@ai-sdk/google 2.0.0-alpha.9 → 2.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,22 +26,21 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/google-provider.ts
29
- var import_provider4 = require("@ai-sdk/provider");
30
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
31
30
 
32
31
  // src/google-generative-ai-embedding-model.ts
33
32
  var import_provider = require("@ai-sdk/provider");
34
33
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
35
- var import_zod3 = require("zod");
34
+ var import_v43 = require("zod/v4");
36
35
 
37
36
  // src/google-error.ts
38
37
  var import_provider_utils = require("@ai-sdk/provider-utils");
39
- var import_zod = require("zod");
40
- var googleErrorDataSchema = import_zod.z.object({
41
- error: import_zod.z.object({
42
- code: import_zod.z.number().nullable(),
43
- message: import_zod.z.string(),
44
- status: import_zod.z.string()
38
+ var import_v4 = require("zod/v4");
39
+ var googleErrorDataSchema = import_v4.z.object({
40
+ error: import_v4.z.object({
41
+ code: import_v4.z.number().nullable(),
42
+ message: import_v4.z.string(),
43
+ status: import_v4.z.string()
45
44
  })
46
45
  });
47
46
  var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
@@ -50,13 +49,13 @@ var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
50
49
  });
51
50
 
52
51
  // src/google-generative-ai-embedding-options.ts
53
- var import_zod2 = require("zod");
54
- var googleGenerativeAIEmbeddingProviderOptions = import_zod2.z.object({
52
+ var import_v42 = require("zod/v4");
53
+ var googleGenerativeAIEmbeddingProviderOptions = import_v42.z.object({
55
54
  /**
56
55
  * Optional. Optional reduced dimension for the output embedding.
57
56
  * If set, excessive values in the output embedding are truncated from the end.
58
57
  */
59
- outputDimensionality: import_zod2.z.number().optional(),
58
+ outputDimensionality: import_v42.z.number().optional(),
60
59
  /**
61
60
  * Optional. Specifies the task type for generating embeddings.
62
61
  * Supported task types:
@@ -69,7 +68,7 @@ var googleGenerativeAIEmbeddingProviderOptions = import_zod2.z.object({
69
68
  * - FACT_VERIFICATION: Optimized for verifying factual information.
70
69
  * - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
71
70
  */
72
- taskType: import_zod2.z.enum([
71
+ taskType: import_v42.z.enum([
73
72
  "SEMANTIC_SIMILARITY",
74
73
  "CLASSIFICATION",
75
74
  "CLUSTERING",
@@ -145,13 +144,13 @@ var GoogleGenerativeAIEmbeddingModel = class {
145
144
  };
146
145
  }
147
146
  };
148
- var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
149
- embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
147
+ var googleGenerativeAITextEmbeddingResponseSchema = import_v43.z.object({
148
+ embeddings: import_v43.z.array(import_v43.z.object({ values: import_v43.z.array(import_v43.z.number()) }))
150
149
  });
151
150
 
152
151
  // src/google-generative-ai-language-model.ts
153
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
154
- var import_zod5 = require("zod");
152
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
153
+ var import_v47 = require("zod/v4");
155
154
 
156
155
  // src/convert-json-schema-to-openapi-schema.ts
157
156
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
@@ -247,16 +246,18 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
247
246
  return result;
248
247
  }
249
248
  function isEmptyObjectSchema(jsonSchema) {
250
- return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0);
249
+ return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
251
250
  }
252
251
 
253
252
  // src/convert-to-google-generative-ai-messages.ts
254
253
  var import_provider2 = require("@ai-sdk/provider");
255
254
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
256
- function convertToGoogleGenerativeAIMessages(prompt) {
255
+ function convertToGoogleGenerativeAIMessages(prompt, options) {
256
+ var _a;
257
257
  const systemInstructionParts = [];
258
258
  const contents = [];
259
259
  let systemMessagesAllowed = true;
260
+ const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
260
261
  for (const { role, content } of prompt) {
261
262
  switch (role) {
262
263
  case "system": {
@@ -330,7 +331,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
330
331
  return {
331
332
  functionCall: {
332
333
  name: part.toolName,
333
- args: part.args
334
+ args: part.input
334
335
  }
335
336
  };
336
337
  }
@@ -348,7 +349,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
348
349
  name: part.toolName,
349
350
  response: {
350
351
  name: part.toolName,
351
- content: part.result
352
+ content: part.output.value
352
353
  }
353
354
  }
354
355
  }))
@@ -357,8 +358,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
357
358
  }
358
359
  }
359
360
  }
361
+ if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
362
+ const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
363
+ contents[0].parts.unshift({ text: systemText + "\n\n" });
364
+ }
360
365
  return {
361
- systemInstruction: systemInstructionParts.length > 0 ? { parts: systemInstructionParts } : void 0,
366
+ systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
362
367
  contents
363
368
  };
364
369
  }
@@ -369,29 +374,19 @@ function getModelPath(modelId) {
369
374
  }
370
375
 
371
376
  // src/google-generative-ai-options.ts
372
- var import_zod4 = require("zod");
373
- var dynamicRetrievalConfig = import_zod4.z.object({
374
- /**
375
- * The mode of the predictor to be used in dynamic retrieval.
376
- */
377
- mode: import_zod4.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
378
- /**
379
- * The threshold to be used in dynamic retrieval. If not set, a system default
380
- * value is used.
381
- */
382
- dynamicThreshold: import_zod4.z.number().optional()
383
- });
384
- var googleGenerativeAIProviderOptions = import_zod4.z.object({
385
- responseModalities: import_zod4.z.array(import_zod4.z.enum(["TEXT", "IMAGE"])).optional(),
386
- thinkingConfig: import_zod4.z.object({
387
- thinkingBudget: import_zod4.z.number().optional()
377
+ var import_v44 = require("zod/v4");
378
+ var googleGenerativeAIProviderOptions = import_v44.z.object({
379
+ responseModalities: import_v44.z.array(import_v44.z.enum(["TEXT", "IMAGE"])).optional(),
380
+ thinkingConfig: import_v44.z.object({
381
+ thinkingBudget: import_v44.z.number().optional(),
382
+ includeThoughts: import_v44.z.boolean().optional()
388
383
  }).optional(),
389
384
  /**
390
385
  Optional.
391
386
  The name of the cached content used as context to serve the prediction.
392
387
  Format: cachedContents/{cachedContent}
393
388
  */
394
- cachedContent: import_zod4.z.string().optional(),
389
+ cachedContent: import_v44.z.string().optional(),
395
390
  /**
396
391
  * Optional. Enable structured output. Default is true.
397
392
  *
@@ -400,13 +395,13 @@ var googleGenerativeAIProviderOptions = import_zod4.z.object({
400
395
  * Google Generative AI uses. You can use this to disable
401
396
  * structured outputs if you need to.
402
397
  */
403
- structuredOutputs: import_zod4.z.boolean().optional(),
398
+ structuredOutputs: import_v44.z.boolean().optional(),
404
399
  /**
405
400
  Optional. A list of unique safety settings for blocking unsafe content.
406
401
  */
407
- safetySettings: import_zod4.z.array(
408
- import_zod4.z.object({
409
- category: import_zod4.z.enum([
402
+ safetySettings: import_v44.z.array(
403
+ import_v44.z.object({
404
+ category: import_v44.z.enum([
410
405
  "HARM_CATEGORY_UNSPECIFIED",
411
406
  "HARM_CATEGORY_HATE_SPEECH",
412
407
  "HARM_CATEGORY_DANGEROUS_CONTENT",
@@ -414,7 +409,7 @@ var googleGenerativeAIProviderOptions = import_zod4.z.object({
414
409
  "HARM_CATEGORY_SEXUALLY_EXPLICIT",
415
410
  "HARM_CATEGORY_CIVIC_INTEGRITY"
416
411
  ]),
417
- threshold: import_zod4.z.enum([
412
+ threshold: import_v44.z.enum([
418
413
  "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
419
414
  "BLOCK_LOW_AND_ABOVE",
420
415
  "BLOCK_MEDIUM_AND_ABOVE",
@@ -424,7 +419,7 @@ var googleGenerativeAIProviderOptions = import_zod4.z.object({
424
419
  ])
425
420
  })
426
421
  ).optional(),
427
- threshold: import_zod4.z.enum([
422
+ threshold: import_v44.z.enum([
428
423
  "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
429
424
  "BLOCK_LOW_AND_ABOVE",
430
425
  "BLOCK_MEDIUM_AND_ABOVE",
@@ -437,21 +432,7 @@ var googleGenerativeAIProviderOptions = import_zod4.z.object({
437
432
  *
438
433
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
439
434
  */
440
- audioTimestamp: import_zod4.z.boolean().optional(),
441
- /**
442
- Optional. When enabled, the model will use Google search to ground the response.
443
-
444
- @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
445
- */
446
- useSearchGrounding: import_zod4.z.boolean().optional(),
447
- /**
448
- Optional. Specifies the dynamic retrieval configuration.
449
-
450
- @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
451
-
452
- @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
453
- */
454
- dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
435
+ audioTimestamp: import_v44.z.boolean().optional()
455
436
  });
456
437
 
457
438
  // src/google-prepare-tools.ts
@@ -459,8 +440,6 @@ var import_provider3 = require("@ai-sdk/provider");
459
440
  function prepareTools({
460
441
  tools,
461
442
  toolChoice,
462
- useSearchGrounding,
463
- dynamicRetrievalConfig: dynamicRetrievalConfig2,
464
443
  modelId
465
444
  }) {
466
445
  var _a;
@@ -468,28 +447,76 @@ function prepareTools({
468
447
  const toolWarnings = [];
469
448
  const isGemini2 = modelId.includes("gemini-2");
470
449
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
471
- if (useSearchGrounding) {
450
+ if (tools == null) {
451
+ return { tools: void 0, toolConfig: void 0, toolWarnings };
452
+ }
453
+ const hasFunctionTools = tools.some((tool) => tool.type === "function");
454
+ const hasProviderDefinedTools = tools.some(
455
+ (tool) => tool.type === "provider-defined"
456
+ );
457
+ if (hasFunctionTools && hasProviderDefinedTools) {
458
+ toolWarnings.push({
459
+ type: "unsupported-tool",
460
+ tool: tools.find((tool) => tool.type === "function"),
461
+ details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
462
+ });
463
+ }
464
+ if (hasProviderDefinedTools) {
465
+ const googleTools2 = {};
466
+ const providerDefinedTools = tools.filter(
467
+ (tool) => tool.type === "provider-defined"
468
+ );
469
+ providerDefinedTools.forEach((tool) => {
470
+ switch (tool.id) {
471
+ case "google.google_search":
472
+ if (isGemini2) {
473
+ googleTools2.googleSearch = {};
474
+ } else if (supportsDynamicRetrieval) {
475
+ googleTools2.googleSearchRetrieval = {
476
+ dynamicRetrievalConfig: {
477
+ mode: tool.args.mode,
478
+ dynamicThreshold: tool.args.dynamicThreshold
479
+ }
480
+ };
481
+ } else {
482
+ googleTools2.googleSearchRetrieval = {};
483
+ }
484
+ break;
485
+ case "google.url_context":
486
+ if (isGemini2) {
487
+ googleTools2.urlContext = {};
488
+ } else {
489
+ toolWarnings.push({
490
+ type: "unsupported-tool",
491
+ tool,
492
+ details: "The URL context tool is not supported with other Gemini models than Gemini 2."
493
+ });
494
+ }
495
+ break;
496
+ default:
497
+ toolWarnings.push({ type: "unsupported-tool", tool });
498
+ break;
499
+ }
500
+ });
472
501
  return {
473
- tools: isGemini2 ? { googleSearch: {} } : {
474
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
475
- },
502
+ tools: Object.keys(googleTools2).length > 0 ? googleTools2 : void 0,
476
503
  toolConfig: void 0,
477
504
  toolWarnings
478
505
  };
479
506
  }
480
- if (tools == null) {
481
- return { tools: void 0, toolConfig: void 0, toolWarnings };
482
- }
483
507
  const functionDeclarations = [];
484
508
  for (const tool of tools) {
485
- if (tool.type === "provider-defined") {
486
- toolWarnings.push({ type: "unsupported-tool", tool });
487
- } else {
488
- functionDeclarations.push({
489
- name: tool.name,
490
- description: (_a = tool.description) != null ? _a : "",
491
- parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
492
- });
509
+ switch (tool.type) {
510
+ case "function":
511
+ functionDeclarations.push({
512
+ name: tool.name,
513
+ description: (_a = tool.description) != null ? _a : "",
514
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
515
+ });
516
+ break;
517
+ default:
518
+ toolWarnings.push({ type: "unsupported-tool", tool });
519
+ break;
493
520
  }
494
521
  }
495
522
  if (toolChoice == null) {
@@ -566,12 +593,72 @@ function mapGoogleGenerativeAIFinishReason({
566
593
  }
567
594
  }
568
595
 
596
+ // src/tool/google-search.ts
597
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
598
+ var import_v45 = require("zod/v4");
599
+ var groundingChunkSchema = import_v45.z.object({
600
+ web: import_v45.z.object({ uri: import_v45.z.string(), title: import_v45.z.string() }).nullish(),
601
+ retrievedContext: import_v45.z.object({ uri: import_v45.z.string(), title: import_v45.z.string() }).nullish()
602
+ });
603
+ var groundingMetadataSchema = import_v45.z.object({
604
+ webSearchQueries: import_v45.z.array(import_v45.z.string()).nullish(),
605
+ retrievalQueries: import_v45.z.array(import_v45.z.string()).nullish(),
606
+ searchEntryPoint: import_v45.z.object({ renderedContent: import_v45.z.string() }).nullish(),
607
+ groundingChunks: import_v45.z.array(groundingChunkSchema).nullish(),
608
+ groundingSupports: import_v45.z.array(
609
+ import_v45.z.object({
610
+ segment: import_v45.z.object({
611
+ startIndex: import_v45.z.number().nullish(),
612
+ endIndex: import_v45.z.number().nullish(),
613
+ text: import_v45.z.string().nullish()
614
+ }),
615
+ segment_text: import_v45.z.string().nullish(),
616
+ groundingChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
617
+ supportChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
618
+ confidenceScores: import_v45.z.array(import_v45.z.number()).nullish(),
619
+ confidenceScore: import_v45.z.array(import_v45.z.number()).nullish()
620
+ })
621
+ ).nullish(),
622
+ retrievalMetadata: import_v45.z.union([
623
+ import_v45.z.object({
624
+ webDynamicRetrievalScore: import_v45.z.number()
625
+ }),
626
+ import_v45.z.object({})
627
+ ]).nullish()
628
+ });
629
+ var googleSearch = (0, import_provider_utils4.createProviderDefinedToolFactory)({
630
+ id: "google.google_search",
631
+ name: "google_search",
632
+ inputSchema: import_v45.z.object({
633
+ mode: import_v45.z.enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
634
+ dynamicThreshold: import_v45.z.number().default(1)
635
+ })
636
+ });
637
+
638
+ // src/tool/url-context.ts
639
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
640
+ var import_v46 = require("zod/v4");
641
+ var urlMetadataSchema = import_v46.z.object({
642
+ retrievedUrl: import_v46.z.string(),
643
+ urlRetrievalStatus: import_v46.z.string()
644
+ });
645
+ var urlContextMetadataSchema = import_v46.z.object({
646
+ urlMetadata: import_v46.z.array(urlMetadataSchema)
647
+ });
648
+ var urlContext = (0, import_provider_utils5.createProviderDefinedToolFactory)({
649
+ id: "google.url_context",
650
+ name: "url_context",
651
+ inputSchema: import_v46.z.object({})
652
+ });
653
+
569
654
  // src/google-generative-ai-language-model.ts
570
655
  var GoogleGenerativeAILanguageModel = class {
571
656
  constructor(modelId, config) {
572
657
  this.specificationVersion = "v2";
658
+ var _a;
573
659
  this.modelId = modelId;
574
660
  this.config = config;
661
+ this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils6.generateId;
575
662
  }
576
663
  get provider() {
577
664
  return this.config.provider;
@@ -597,21 +684,29 @@ var GoogleGenerativeAILanguageModel = class {
597
684
  }) {
598
685
  var _a, _b;
599
686
  const warnings = [];
600
- const googleOptions = await (0, import_provider_utils4.parseProviderOptions)({
687
+ const googleOptions = await (0, import_provider_utils6.parseProviderOptions)({
601
688
  provider: "google",
602
689
  providerOptions,
603
690
  schema: googleGenerativeAIProviderOptions
604
691
  });
605
- const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
692
+ if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
693
+ warnings.push({
694
+ type: "other",
695
+ message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
696
+ });
697
+ }
698
+ const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
699
+ const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
700
+ prompt,
701
+ { isGemmaModel }
702
+ );
606
703
  const {
607
- tools: googleTools,
704
+ tools: googleTools2,
608
705
  toolConfig: googleToolConfig,
609
706
  toolWarnings
610
707
  } = prepareTools({
611
708
  tools,
612
709
  toolChoice,
613
- useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
614
- dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
615
710
  modelId: this.modelId
616
711
  });
617
712
  return {
@@ -640,9 +735,9 @@ var GoogleGenerativeAILanguageModel = class {
640
735
  thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
641
736
  },
642
737
  contents,
643
- systemInstruction,
738
+ systemInstruction: isGemmaModel ? void 0 : systemInstruction,
644
739
  safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
645
- tools: googleTools,
740
+ tools: googleTools2,
646
741
  toolConfig: googleToolConfig,
647
742
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
648
743
  },
@@ -650,41 +745,45 @@ var GoogleGenerativeAILanguageModel = class {
650
745
  };
651
746
  }
652
747
  async doGenerate(options) {
653
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
748
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
654
749
  const { args, warnings } = await this.getArgs(options);
655
750
  const body = JSON.stringify(args);
656
- const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
657
- await (0, import_provider_utils4.resolve)(this.config.headers),
751
+ const mergedHeaders = (0, import_provider_utils6.combineHeaders)(
752
+ await (0, import_provider_utils6.resolve)(this.config.headers),
658
753
  options.headers
659
754
  );
660
755
  const {
661
756
  responseHeaders,
662
757
  value: response,
663
758
  rawValue: rawResponse
664
- } = await (0, import_provider_utils4.postJsonToApi)({
759
+ } = await (0, import_provider_utils6.postJsonToApi)({
665
760
  url: `${this.config.baseURL}/${getModelPath(
666
761
  this.modelId
667
762
  )}:generateContent`,
668
763
  headers: mergedHeaders,
669
764
  body: args,
670
765
  failedResponseHandler: googleFailedResponseHandler,
671
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(responseSchema),
766
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(responseSchema),
672
767
  abortSignal: options.abortSignal,
673
768
  fetch: this.config.fetch
674
769
  });
675
770
  const candidate = response.candidates[0];
676
771
  const content = [];
677
772
  const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
773
+ const usageMetadata = response.usageMetadata;
678
774
  for (const part of parts) {
679
- if ("text" in part && part.text.length > 0) {
680
- content.push({ type: "text", text: part.text });
775
+ if ("text" in part && part.text != null && part.text.length > 0) {
776
+ if (part.thought === true) {
777
+ content.push({ type: "reasoning", text: part.text });
778
+ } else {
779
+ content.push({ type: "text", text: part.text });
780
+ }
681
781
  } else if ("functionCall" in part) {
682
782
  content.push({
683
783
  type: "tool-call",
684
- toolCallType: "function",
685
784
  toolCallId: this.config.generateId(),
686
785
  toolName: part.functionCall.name,
687
- args: JSON.stringify(part.functionCall.args)
786
+ input: JSON.stringify(part.functionCall.args)
688
787
  });
689
788
  } else if ("inlineData" in part) {
690
789
  content.push({
@@ -701,7 +800,6 @@ var GoogleGenerativeAILanguageModel = class {
701
800
  for (const source of sources) {
702
801
  content.push(source);
703
802
  }
704
- const usageMetadata = response.usageMetadata;
705
803
  return {
706
804
  content,
707
805
  finishReason: mapGoogleGenerativeAIFinishReason({
@@ -719,7 +817,9 @@ var GoogleGenerativeAILanguageModel = class {
719
817
  providerMetadata: {
720
818
  google: {
721
819
  groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
722
- safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
820
+ urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
821
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
822
+ usageMetadata: usageMetadata != null ? usageMetadata : null
723
823
  }
724
824
  },
725
825
  request: { body },
@@ -733,18 +833,18 @@ var GoogleGenerativeAILanguageModel = class {
733
833
  async doStream(options) {
734
834
  const { args, warnings } = await this.getArgs(options);
735
835
  const body = JSON.stringify(args);
736
- const headers = (0, import_provider_utils4.combineHeaders)(
737
- await (0, import_provider_utils4.resolve)(this.config.headers),
836
+ const headers = (0, import_provider_utils6.combineHeaders)(
837
+ await (0, import_provider_utils6.resolve)(this.config.headers),
738
838
  options.headers
739
839
  );
740
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
840
+ const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
741
841
  url: `${this.config.baseURL}/${getModelPath(
742
842
  this.modelId
743
843
  )}:streamGenerateContent?alt=sse`,
744
844
  headers,
745
845
  body: args,
746
846
  failedResponseHandler: googleFailedResponseHandler,
747
- successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(chunkSchema),
847
+ successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(chunkSchema),
748
848
  abortSignal: options.abortSignal,
749
849
  fetch: this.config.fetch
750
850
  });
@@ -755,8 +855,12 @@ var GoogleGenerativeAILanguageModel = class {
755
855
  totalTokens: void 0
756
856
  };
757
857
  let providerMetadata = void 0;
758
- const generateId2 = this.config.generateId;
858
+ const generateId3 = this.config.generateId;
759
859
  let hasToolCalls = false;
860
+ let currentTextBlockId = null;
861
+ let currentReasoningBlockId = null;
862
+ let blockCounter = 0;
863
+ const emittedSourceUrls = /* @__PURE__ */ new Set();
760
864
  return {
761
865
  stream: response.pipeThrough(
762
866
  new TransformStream({
@@ -764,7 +868,10 @@ var GoogleGenerativeAILanguageModel = class {
764
868
  controller.enqueue({ type: "stream-start", warnings });
765
869
  },
766
870
  transform(chunk, controller) {
767
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
871
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
872
+ if (options.includeRawChunks) {
873
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
874
+ }
768
875
  if (!chunk.success) {
769
876
  controller.enqueue({ type: "error", error: chunk.error });
770
877
  return;
@@ -783,10 +890,64 @@ var GoogleGenerativeAILanguageModel = class {
783
890
  return;
784
891
  }
785
892
  const content = candidate.content;
893
+ const sources = extractSources({
894
+ groundingMetadata: candidate.groundingMetadata,
895
+ generateId: generateId3
896
+ });
897
+ if (sources != null) {
898
+ for (const source of sources) {
899
+ if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
900
+ emittedSourceUrls.add(source.url);
901
+ controller.enqueue(source);
902
+ }
903
+ }
904
+ }
786
905
  if (content != null) {
787
- const deltaText = getTextFromParts(content.parts);
788
- if (deltaText != null) {
789
- controller.enqueue(deltaText);
906
+ const parts = (_g = content.parts) != null ? _g : [];
907
+ for (const part of parts) {
908
+ if ("text" in part && part.text != null && part.text.length > 0) {
909
+ if (part.thought === true) {
910
+ if (currentTextBlockId !== null) {
911
+ controller.enqueue({
912
+ type: "text-end",
913
+ id: currentTextBlockId
914
+ });
915
+ currentTextBlockId = null;
916
+ }
917
+ if (currentReasoningBlockId === null) {
918
+ currentReasoningBlockId = String(blockCounter++);
919
+ controller.enqueue({
920
+ type: "reasoning-start",
921
+ id: currentReasoningBlockId
922
+ });
923
+ }
924
+ controller.enqueue({
925
+ type: "reasoning-delta",
926
+ id: currentReasoningBlockId,
927
+ delta: part.text
928
+ });
929
+ } else {
930
+ if (currentReasoningBlockId !== null) {
931
+ controller.enqueue({
932
+ type: "reasoning-end",
933
+ id: currentReasoningBlockId
934
+ });
935
+ currentReasoningBlockId = null;
936
+ }
937
+ if (currentTextBlockId === null) {
938
+ currentTextBlockId = String(blockCounter++);
939
+ controller.enqueue({
940
+ type: "text-start",
941
+ id: currentTextBlockId
942
+ });
943
+ }
944
+ controller.enqueue({
945
+ type: "text-delta",
946
+ id: currentTextBlockId,
947
+ delta: part.text
948
+ });
949
+ }
950
+ }
790
951
  }
791
952
  const inlineDataParts = getInlineDataParts(content.parts);
792
953
  if (inlineDataParts != null) {
@@ -800,23 +961,29 @@ var GoogleGenerativeAILanguageModel = class {
800
961
  }
801
962
  const toolCallDeltas = getToolCallsFromParts({
802
963
  parts: content.parts,
803
- generateId: generateId2
964
+ generateId: generateId3
804
965
  });
805
966
  if (toolCallDeltas != null) {
806
967
  for (const toolCall of toolCallDeltas) {
807
968
  controller.enqueue({
808
- type: "tool-call-delta",
809
- toolCallType: "function",
810
- toolCallId: toolCall.toolCallId,
811
- toolName: toolCall.toolName,
812
- argsTextDelta: toolCall.args
969
+ type: "tool-input-start",
970
+ id: toolCall.toolCallId,
971
+ toolName: toolCall.toolName
972
+ });
973
+ controller.enqueue({
974
+ type: "tool-input-delta",
975
+ id: toolCall.toolCallId,
976
+ delta: toolCall.args
977
+ });
978
+ controller.enqueue({
979
+ type: "tool-input-end",
980
+ id: toolCall.toolCallId
813
981
  });
814
982
  controller.enqueue({
815
983
  type: "tool-call",
816
- toolCallType: "function",
817
984
  toolCallId: toolCall.toolCallId,
818
985
  toolName: toolCall.toolName,
819
- args: toolCall.args
986
+ input: toolCall.args
820
987
  });
821
988
  hasToolCalls = true;
822
989
  }
@@ -827,22 +994,31 @@ var GoogleGenerativeAILanguageModel = class {
827
994
  finishReason: candidate.finishReason,
828
995
  hasToolCalls
829
996
  });
830
- const sources = (_g = extractSources({
831
- groundingMetadata: candidate.groundingMetadata,
832
- generateId: generateId2
833
- })) != null ? _g : [];
834
- for (const source of sources) {
835
- controller.enqueue(source);
836
- }
837
997
  providerMetadata = {
838
998
  google: {
839
999
  groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
840
- safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
1000
+ urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
1001
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
841
1002
  }
842
1003
  };
1004
+ if (usageMetadata != null) {
1005
+ providerMetadata.google.usageMetadata = usageMetadata;
1006
+ }
843
1007
  }
844
1008
  },
845
1009
  flush(controller) {
1010
+ if (currentTextBlockId !== null) {
1011
+ controller.enqueue({
1012
+ type: "text-end",
1013
+ id: currentTextBlockId
1014
+ });
1015
+ }
1016
+ if (currentReasoningBlockId !== null) {
1017
+ controller.enqueue({
1018
+ type: "reasoning-end",
1019
+ id: currentReasoningBlockId
1020
+ });
1021
+ }
846
1022
  controller.enqueue({
847
1023
  type: "finish",
848
1024
  finishReason,
@@ -859,26 +1035,18 @@ var GoogleGenerativeAILanguageModel = class {
859
1035
  };
860
1036
  function getToolCallsFromParts({
861
1037
  parts,
862
- generateId: generateId2
1038
+ generateId: generateId3
863
1039
  }) {
864
1040
  const functionCallParts = parts == null ? void 0 : parts.filter(
865
1041
  (part) => "functionCall" in part
866
1042
  );
867
1043
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
868
1044
  type: "tool-call",
869
- toolCallType: "function",
870
- toolCallId: generateId2(),
1045
+ toolCallId: generateId3(),
871
1046
  toolName: part.functionCall.name,
872
1047
  args: JSON.stringify(part.functionCall.args)
873
1048
  }));
874
1049
  }
875
- function getTextFromParts(parts) {
876
- const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
877
- return textParts == null || textParts.length === 0 ? void 0 : {
878
- type: "text",
879
- text: textParts.map((part) => part.text).join("")
880
- };
881
- }
882
1050
  function getInlineDataParts(parts) {
883
1051
  return parts == null ? void 0 : parts.filter(
884
1052
  (part) => "inlineData" in part
@@ -886,7 +1054,7 @@ function getInlineDataParts(parts) {
886
1054
  }
887
1055
  function extractSources({
888
1056
  groundingMetadata,
889
- generateId: generateId2
1057
+ generateId: generateId3
890
1058
  }) {
891
1059
  var _a;
892
1060
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
@@ -894,107 +1062,196 @@ function extractSources({
894
1062
  ).map((chunk) => ({
895
1063
  type: "source",
896
1064
  sourceType: "url",
897
- id: generateId2(),
1065
+ id: generateId3(),
898
1066
  url: chunk.web.uri,
899
1067
  title: chunk.web.title
900
1068
  }));
901
1069
  }
902
- var contentSchema = import_zod5.z.object({
903
- role: import_zod5.z.string(),
904
- parts: import_zod5.z.array(
905
- import_zod5.z.union([
906
- import_zod5.z.object({
907
- text: import_zod5.z.string()
908
- }),
909
- import_zod5.z.object({
910
- functionCall: import_zod5.z.object({
911
- name: import_zod5.z.string(),
912
- args: import_zod5.z.unknown()
1070
+ var contentSchema = import_v47.z.object({
1071
+ parts: import_v47.z.array(
1072
+ import_v47.z.union([
1073
+ // note: order matters since text can be fully empty
1074
+ import_v47.z.object({
1075
+ functionCall: import_v47.z.object({
1076
+ name: import_v47.z.string(),
1077
+ args: import_v47.z.unknown()
913
1078
  })
914
1079
  }),
915
- import_zod5.z.object({
916
- inlineData: import_zod5.z.object({
917
- mimeType: import_zod5.z.string(),
918
- data: import_zod5.z.string()
1080
+ import_v47.z.object({
1081
+ inlineData: import_v47.z.object({
1082
+ mimeType: import_v47.z.string(),
1083
+ data: import_v47.z.string()
919
1084
  })
1085
+ }),
1086
+ import_v47.z.object({
1087
+ text: import_v47.z.string().nullish(),
1088
+ thought: import_v47.z.boolean().nullish()
920
1089
  })
921
1090
  ])
922
1091
  ).nullish()
923
1092
  });
924
- var groundingChunkSchema = import_zod5.z.object({
925
- web: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish(),
926
- retrievedContext: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish()
1093
+ var safetyRatingSchema = import_v47.z.object({
1094
+ category: import_v47.z.string().nullish(),
1095
+ probability: import_v47.z.string().nullish(),
1096
+ probabilityScore: import_v47.z.number().nullish(),
1097
+ severity: import_v47.z.string().nullish(),
1098
+ severityScore: import_v47.z.number().nullish(),
1099
+ blocked: import_v47.z.boolean().nullish()
927
1100
  });
928
- var groundingMetadataSchema = import_zod5.z.object({
929
- webSearchQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
930
- retrievalQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
931
- searchEntryPoint: import_zod5.z.object({ renderedContent: import_zod5.z.string() }).nullish(),
932
- groundingChunks: import_zod5.z.array(groundingChunkSchema).nullish(),
933
- groundingSupports: import_zod5.z.array(
934
- import_zod5.z.object({
935
- segment: import_zod5.z.object({
936
- startIndex: import_zod5.z.number().nullish(),
937
- endIndex: import_zod5.z.number().nullish(),
938
- text: import_zod5.z.string().nullish()
939
- }),
940
- segment_text: import_zod5.z.string().nullish(),
941
- groundingChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
942
- supportChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
943
- confidenceScores: import_zod5.z.array(import_zod5.z.number()).nullish(),
944
- confidenceScore: import_zod5.z.array(import_zod5.z.number()).nullish()
945
- })
946
- ).nullish(),
947
- retrievalMetadata: import_zod5.z.union([
948
- import_zod5.z.object({
949
- webDynamicRetrievalScore: import_zod5.z.number()
950
- }),
951
- import_zod5.z.object({})
952
- ]).nullish()
953
- });
954
- var safetyRatingSchema = import_zod5.z.object({
955
- category: import_zod5.z.string().nullish(),
956
- probability: import_zod5.z.string().nullish(),
957
- probabilityScore: import_zod5.z.number().nullish(),
958
- severity: import_zod5.z.string().nullish(),
959
- severityScore: import_zod5.z.number().nullish(),
960
- blocked: import_zod5.z.boolean().nullish()
961
- });
962
- var usageSchema = import_zod5.z.object({
963
- cachedContentTokenCount: import_zod5.z.number().nullish(),
964
- thoughtsTokenCount: import_zod5.z.number().nullish(),
965
- promptTokenCount: import_zod5.z.number().nullish(),
966
- candidatesTokenCount: import_zod5.z.number().nullish(),
967
- totalTokenCount: import_zod5.z.number().nullish()
1101
+ var usageSchema = import_v47.z.object({
1102
+ cachedContentTokenCount: import_v47.z.number().nullish(),
1103
+ thoughtsTokenCount: import_v47.z.number().nullish(),
1104
+ promptTokenCount: import_v47.z.number().nullish(),
1105
+ candidatesTokenCount: import_v47.z.number().nullish(),
1106
+ totalTokenCount: import_v47.z.number().nullish()
968
1107
  });
969
- var responseSchema = import_zod5.z.object({
970
- candidates: import_zod5.z.array(
971
- import_zod5.z.object({
972
- content: contentSchema.nullish().or(import_zod5.z.object({}).strict()),
973
- finishReason: import_zod5.z.string().nullish(),
974
- safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
975
- groundingMetadata: groundingMetadataSchema.nullish()
1108
+ var responseSchema = import_v47.z.object({
1109
+ candidates: import_v47.z.array(
1110
+ import_v47.z.object({
1111
+ content: contentSchema.nullish().or(import_v47.z.object({}).strict()),
1112
+ finishReason: import_v47.z.string().nullish(),
1113
+ safetyRatings: import_v47.z.array(safetyRatingSchema).nullish(),
1114
+ groundingMetadata: groundingMetadataSchema.nullish(),
1115
+ urlContextMetadata: urlContextMetadataSchema.nullish()
976
1116
  })
977
1117
  ),
978
1118
  usageMetadata: usageSchema.nullish()
979
1119
  });
980
- var chunkSchema = import_zod5.z.object({
981
- candidates: import_zod5.z.array(
982
- import_zod5.z.object({
1120
+ var chunkSchema = import_v47.z.object({
1121
+ candidates: import_v47.z.array(
1122
+ import_v47.z.object({
983
1123
  content: contentSchema.nullish(),
984
- finishReason: import_zod5.z.string().nullish(),
985
- safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
986
- groundingMetadata: groundingMetadataSchema.nullish()
1124
+ finishReason: import_v47.z.string().nullish(),
1125
+ safetyRatings: import_v47.z.array(safetyRatingSchema).nullish(),
1126
+ groundingMetadata: groundingMetadataSchema.nullish(),
1127
+ urlContextMetadata: urlContextMetadataSchema.nullish()
987
1128
  })
988
1129
  ).nullish(),
989
1130
  usageMetadata: usageSchema.nullish()
990
1131
  });
991
1132
 
1133
+ // src/google-tools.ts
1134
+ var googleTools = {
1135
+ /**
1136
+ * Creates a Google search tool that gives Google direct access to real-time web content.
1137
+ * Must have name "google_search".
1138
+ */
1139
+ googleSearch,
1140
+ /**
1141
+ * Creates a URL context tool that gives Google direct access to real-time web content.
1142
+ * Must have name "url_context".
1143
+ */
1144
+ urlContext
1145
+ };
1146
+
1147
+ // src/google-generative-ai-image-model.ts
1148
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1149
+ var import_v48 = require("zod/v4");
1150
+ var GoogleGenerativeAIImageModel = class {
1151
+ constructor(modelId, settings, config) {
1152
+ this.modelId = modelId;
1153
+ this.settings = settings;
1154
+ this.config = config;
1155
+ this.specificationVersion = "v2";
1156
+ }
1157
+ get maxImagesPerCall() {
1158
+ var _a;
1159
+ return (_a = this.settings.maxImagesPerCall) != null ? _a : 4;
1160
+ }
1161
+ get provider() {
1162
+ return this.config.provider;
1163
+ }
1164
+ async doGenerate(options) {
1165
+ var _a, _b, _c;
1166
+ const {
1167
+ prompt,
1168
+ n = 1,
1169
+ size = "1024x1024",
1170
+ aspectRatio = "1:1",
1171
+ seed,
1172
+ providerOptions,
1173
+ headers,
1174
+ abortSignal
1175
+ } = options;
1176
+ const warnings = [];
1177
+ if (size != null) {
1178
+ warnings.push({
1179
+ type: "unsupported-setting",
1180
+ setting: "size",
1181
+ details: "This model does not support the `size` option. Use `aspectRatio` instead."
1182
+ });
1183
+ }
1184
+ if (seed != null) {
1185
+ warnings.push({
1186
+ type: "unsupported-setting",
1187
+ setting: "seed",
1188
+ details: "This model does not support the `seed` option through this provider."
1189
+ });
1190
+ }
1191
+ const googleOptions = await (0, import_provider_utils7.parseProviderOptions)({
1192
+ provider: "google",
1193
+ providerOptions,
1194
+ schema: googleImageProviderOptionsSchema
1195
+ });
1196
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1197
+ const parameters = {
1198
+ sampleCount: n
1199
+ };
1200
+ if (aspectRatio != null) {
1201
+ parameters.aspectRatio = aspectRatio;
1202
+ }
1203
+ if (googleOptions) {
1204
+ Object.assign(parameters, googleOptions);
1205
+ }
1206
+ const body = {
1207
+ instances: [{ prompt }],
1208
+ parameters
1209
+ };
1210
+ const { responseHeaders, value: response } = await (0, import_provider_utils7.postJsonToApi)({
1211
+ url: `${this.config.baseURL}/models/${this.modelId}:predict`,
1212
+ headers: (0, import_provider_utils7.combineHeaders)(await (0, import_provider_utils7.resolve)(this.config.headers), headers),
1213
+ body,
1214
+ failedResponseHandler: googleFailedResponseHandler,
1215
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1216
+ googleImageResponseSchema
1217
+ ),
1218
+ abortSignal,
1219
+ fetch: this.config.fetch
1220
+ });
1221
+ return {
1222
+ images: response.predictions.map(
1223
+ (p) => p.bytesBase64Encoded
1224
+ ),
1225
+ warnings: warnings != null ? warnings : [],
1226
+ providerMetadata: {
1227
+ google: {
1228
+ images: response.predictions.map((prediction) => ({
1229
+ // Add any prediction-specific metadata here
1230
+ }))
1231
+ }
1232
+ },
1233
+ response: {
1234
+ timestamp: currentDate,
1235
+ modelId: this.modelId,
1236
+ headers: responseHeaders
1237
+ }
1238
+ };
1239
+ }
1240
+ };
1241
+ var googleImageResponseSchema = import_v48.z.object({
1242
+ predictions: import_v48.z.array(import_v48.z.object({ bytesBase64Encoded: import_v48.z.string() })).default([])
1243
+ });
1244
+ var googleImageProviderOptionsSchema = import_v48.z.object({
1245
+ personGeneration: import_v48.z.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
1246
+ aspectRatio: import_v48.z.enum(["1:1", "3:4", "4:3", "9:16", "16:9"]).nullish()
1247
+ });
1248
+
992
1249
  // src/google-provider.ts
993
1250
  function createGoogleGenerativeAI(options = {}) {
994
1251
  var _a;
995
- const baseURL = (_a = (0, import_provider_utils5.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
1252
+ const baseURL = (_a = (0, import_provider_utils8.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
996
1253
  const getHeaders = () => ({
997
- "x-goog-api-key": (0, import_provider_utils5.loadApiKey)({
1254
+ "x-goog-api-key": (0, import_provider_utils8.loadApiKey)({
998
1255
  apiKey: options.apiKey,
999
1256
  environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
1000
1257
  description: "Google Generative AI"
@@ -1007,7 +1264,7 @@ function createGoogleGenerativeAI(options = {}) {
1007
1264
  provider: "google.generative-ai",
1008
1265
  baseURL,
1009
1266
  headers: getHeaders,
1010
- generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils5.generateId,
1267
+ generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils8.generateId,
1011
1268
  supportedUrls: () => ({
1012
1269
  "*": [
1013
1270
  // Only allow requests to the Google Generative Language "files" endpoint
@@ -1024,6 +1281,12 @@ function createGoogleGenerativeAI(options = {}) {
1024
1281
  headers: getHeaders,
1025
1282
  fetch: options.fetch
1026
1283
  });
1284
+ const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
1285
+ provider: "google.generative-ai",
1286
+ baseURL,
1287
+ headers: getHeaders,
1288
+ fetch: options.fetch
1289
+ });
1027
1290
  const provider = function(modelId) {
1028
1291
  if (new.target) {
1029
1292
  throw new Error(
@@ -1038,9 +1301,9 @@ function createGoogleGenerativeAI(options = {}) {
1038
1301
  provider.embedding = createEmbeddingModel;
1039
1302
  provider.textEmbedding = createEmbeddingModel;
1040
1303
  provider.textEmbeddingModel = createEmbeddingModel;
1041
- provider.imageModel = (modelId) => {
1042
- throw new import_provider4.NoSuchModelError({ modelId, modelType: "imageModel" });
1043
- };
1304
+ provider.image = createImageModel;
1305
+ provider.imageModel = createImageModel;
1306
+ provider.tools = googleTools;
1044
1307
  return provider;
1045
1308
  }
1046
1309
  var google = createGoogleGenerativeAI();