@ai-sdk/google 4.0.0-beta.2 → 4.0.0-beta.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,134 @@
1
1
  # @ai-sdk/google
2
2
 
3
+ ## 4.0.0-beta.20
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [986c6fd]
8
+ - Updated dependencies [493295c]
9
+ - @ai-sdk/provider-utils@5.0.0-beta.8
10
+
11
+ ## 4.0.0-beta.19
12
+
13
+ ### Patch Changes
14
+
15
+ - 01fa606: feat(provider/google): support combining built-in tools with function calling on Gemini 3
16
+
17
+ ## 4.0.0-beta.18
18
+
19
+ ### Patch Changes
20
+
21
+ - 9715ec7: feat(provider/google): add support for service tier parameter
22
+
23
+ ## 4.0.0-beta.17
24
+
25
+ ### Patch Changes
26
+
27
+ - Updated dependencies [1f509d4]
28
+ - @ai-sdk/provider-utils@5.0.0-beta.7
29
+ - @ai-sdk/provider@4.0.0-beta.5
30
+
31
+ ## 4.0.0-beta.16
32
+
33
+ ### Patch Changes
34
+
35
+ - 6190649: chore(provider/google): remove obsolete Google image model
36
+
37
+ ## 4.0.0-beta.15
38
+
39
+ ### Patch Changes
40
+
41
+ - 18c1970: feat(provider/google): Add multimodal tool-result support for Google function responses.
42
+
43
+ Tool results with `output.type = 'content'` now map media parts into
44
+ `functionResponse.parts` for Google models, including `image-data`,
45
+ `file-data`, and base64 `data:` URLs in URL-style content parts.
46
+ Remote HTTP(S) URLs in URL-style tool-result parts are not supported.
47
+
48
+ ## 4.0.0-beta.14
49
+
50
+ ### Patch Changes
51
+
52
+ - 3887c70: feat(provider): add new top-level reasoning parameter to spec and support it in `generateText` and `streamText`
53
+ - Updated dependencies [3887c70]
54
+ - @ai-sdk/provider-utils@5.0.0-beta.6
55
+ - @ai-sdk/provider@4.0.0-beta.4
56
+
57
+ ## 4.0.0-beta.13
58
+
59
+ ### Patch Changes
60
+
61
+ - Updated dependencies [776b617]
62
+ - @ai-sdk/provider-utils@5.0.0-beta.5
63
+ - @ai-sdk/provider@4.0.0-beta.3
64
+
65
+ ## 4.0.0-beta.12
66
+
67
+ ### Patch Changes
68
+
69
+ - Updated dependencies [61753c3]
70
+ - @ai-sdk/provider-utils@5.0.0-beta.4
71
+
72
+ ## 4.0.0-beta.11
73
+
74
+ ### Patch Changes
75
+
76
+ - f7d4f01: feat(provider): add support for `reasoning-file` type for files that are part of reasoning
77
+ - Updated dependencies [f7d4f01]
78
+ - @ai-sdk/provider-utils@5.0.0-beta.3
79
+ - @ai-sdk/provider@4.0.0-beta.2
80
+
81
+ ## 4.0.0-beta.10
82
+
83
+ ### Patch Changes
84
+
85
+ - Updated dependencies [5c2a5a2]
86
+ - @ai-sdk/provider@4.0.0-beta.1
87
+ - @ai-sdk/provider-utils@5.0.0-beta.2
88
+
89
+ ## 4.0.0-beta.9
90
+
91
+ ### Patch Changes
92
+
93
+ - e59176c: chore(google): update v3 specs to v4
94
+
95
+ ## 4.0.0-beta.8
96
+
97
+ ### Patch Changes
98
+
99
+ - 4dac142: feat(google): add new finishMessage field in providerMetadata
100
+
101
+ ## 4.0.0-beta.7
102
+
103
+ ### Patch Changes
104
+
105
+ - 82288b0: feat(provider/google): add `gemini-embedding-2-preview` and fix multimodal embedding support with `embedMany`
106
+
107
+ ## 4.0.0-beta.6
108
+
109
+ ### Patch Changes
110
+
111
+ - add4326: fix(provider/google): correct JSDoc for multimodal embedding content option
112
+
113
+ ## 4.0.0-beta.5
114
+
115
+ ### Patch Changes
116
+
117
+ - ab43029: feat(provider/google): support multimodal content parts in embedding provider options
118
+
119
+ ## 4.0.0-beta.4
120
+
121
+ ### Patch Changes
122
+
123
+ - 2edd14e: fix(provider/google): correctly mark reasoning files as such and fix related multi-turn errors
124
+
125
+ ## 4.0.0-beta.3
126
+
127
+ ### Patch Changes
128
+
129
+ - Updated dependencies [531251e]
130
+ - @ai-sdk/provider-utils@5.0.0-beta.1
131
+
3
132
  ## 4.0.0-beta.2
4
133
 
5
134
  ### Patch Changes
@@ -338,13 +467,13 @@
338
467
  Before
339
468
 
340
469
  ```ts
341
- model.textEmbeddingModel('my-model-id');
470
+ model.textEmbeddingModel("my-model-id");
342
471
  ```
343
472
 
344
473
  After
345
474
 
346
475
  ```ts
347
- model.embeddingModel('my-model-id');
476
+ model.embeddingModel("my-model-id");
348
477
  ```
349
478
 
350
479
  - 2625a04: feat(openai); update spec for mcp approval
@@ -653,13 +782,13 @@
653
782
  Before
654
783
 
655
784
  ```ts
656
- model.textEmbeddingModel('my-model-id');
785
+ model.textEmbeddingModel("my-model-id");
657
786
  ```
658
787
 
659
788
  After
660
789
 
661
790
  ```ts
662
- model.embeddingModel('my-model-id');
791
+ model.embeddingModel("my-model-id");
663
792
  ```
664
793
 
665
794
  - Updated dependencies [8d9e8ad]
package/dist/index.d.mts CHANGED
@@ -1,6 +1,6 @@
1
1
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
2
2
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
3
- import { ProviderV3, LanguageModelV3, ImageModelV3, EmbeddingModelV3, Experimental_VideoModelV3 } from '@ai-sdk/provider';
3
+ import { ProviderV4, LanguageModelV4, ImageModelV4, EmbeddingModelV4, Experimental_VideoModelV4 } from '@ai-sdk/provider';
4
4
 
5
5
  declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
6
6
  error: {
@@ -11,7 +11,7 @@ declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
11
11
  }>;
12
12
  type GoogleErrorData = InferSchema<typeof googleErrorDataSchema>;
13
13
 
14
- type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-exp-image-generation' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-lite-preview-09-2025' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
14
+ type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-lite-preview-09-2025' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
15
15
  declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
16
16
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
17
17
  thinkingConfig?: {
@@ -39,6 +39,7 @@ declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
39
39
  longitude: number;
40
40
  } | undefined;
41
41
  } | undefined;
42
+ serviceTier?: "SERVICE_TIER_STANDARD" | "SERVICE_TIER_FLEX" | "SERVICE_TIER_PRIORITY" | undefined;
42
43
  }>;
43
44
  type GoogleLanguageModelOptions = InferSchema<typeof googleLanguageModelOptions>;
44
45
 
@@ -56,6 +57,21 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
56
57
  mimeType: string;
57
58
  data: string;
58
59
  };
60
+ thought?: boolean | null | undefined;
61
+ thoughtSignature?: string | null | undefined;
62
+ } | {
63
+ toolCall: {
64
+ toolType: string;
65
+ id: string;
66
+ args?: unknown;
67
+ };
68
+ thoughtSignature?: string | null | undefined;
69
+ } | {
70
+ toolResponse: {
71
+ toolType: string;
72
+ id: string;
73
+ response?: unknown;
74
+ };
59
75
  thoughtSignature?: string | null | undefined;
60
76
  } | {
61
77
  executableCode?: {
@@ -72,6 +88,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
72
88
  })[] | null | undefined;
73
89
  } | null | undefined;
74
90
  finishReason?: string | null | undefined;
91
+ finishMessage?: string | null | undefined;
75
92
  safetyRatings?: {
76
93
  category?: string | null | undefined;
77
94
  probability?: string | null | undefined;
@@ -153,18 +170,27 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
153
170
  blocked?: boolean | null | undefined;
154
171
  }[] | null | undefined;
155
172
  } | null | undefined;
173
+ serviceTier?: string | null | undefined;
156
174
  }>;
157
175
  type GroundingMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
158
176
  type UrlContextMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
159
177
  type SafetyRatingSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']>[number];
178
+ type PromptFeedbackSchema = NonNullable<InferSchema<typeof responseSchema>['promptFeedback']>;
179
+ type UsageMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['usageMetadata']>;
160
180
 
161
181
  type GoogleGenerativeAIGroundingMetadata = GroundingMetadataSchema;
162
182
  type GoogleGenerativeAIUrlContextMetadata = UrlContextMetadataSchema;
163
183
  type GoogleGenerativeAISafetyRating = SafetyRatingSchema;
184
+ type GoogleGenerativeAIPromptFeedback = PromptFeedbackSchema;
185
+ type GoogleGenerativeAIUsageMetadata = UsageMetadataSchema;
164
186
  interface GoogleGenerativeAIProviderMetadata {
187
+ promptFeedback: GoogleGenerativeAIPromptFeedback | null;
165
188
  groundingMetadata: GoogleGenerativeAIGroundingMetadata | null;
166
189
  urlContextMetadata: GoogleGenerativeAIUrlContextMetadata | null;
167
190
  safetyRatings: GoogleGenerativeAISafetyRating[] | null;
191
+ usageMetadata: GoogleGenerativeAIUsageMetadata | null;
192
+ finishMessage: string | null;
193
+ serviceTier: string | null;
168
194
  }
169
195
 
170
196
  type GoogleGenerativeAIImageModelId = 'imagen-4.0-generate-001' | 'imagen-4.0-ultra-generate-001' | 'imagen-4.0-fast-generate-001' | 'gemini-2.5-flash-image' | 'gemini-3-pro-image-preview' | 'gemini-3.1-flash-image-preview' | (string & {});
@@ -181,10 +207,18 @@ declare const googleImageModelOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
181
207
  }>;
182
208
  type GoogleImageModelOptions = InferSchema<typeof googleImageModelOptionsSchema>;
183
209
 
184
- type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | (string & {});
210
+ type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | 'gemini-embedding-2-preview' | (string & {});
185
211
  declare const googleEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
186
212
  outputDimensionality?: number | undefined;
187
213
  taskType?: "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "RETRIEVAL_DOCUMENT" | "RETRIEVAL_QUERY" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | undefined;
214
+ content?: (({
215
+ text: string;
216
+ } | {
217
+ inlineData: {
218
+ mimeType: string;
219
+ data: string;
220
+ };
221
+ })[] | null)[] | undefined;
188
222
  }>;
189
223
  type GoogleEmbeddingModelOptions = InferSchema<typeof googleEmbeddingModelOptions>;
190
224
 
@@ -217,7 +251,7 @@ declare const googleTools: {
217
251
  startTime: string;
218
252
  endTime: string;
219
253
  } | undefined;
220
- }>;
254
+ }, {}>;
221
255
  /**
222
256
  * Creates an Enterprise Web Search tool for grounding responses using a compliance-focused web index.
223
257
  * Designed for highly-regulated industries (finance, healthcare, public sector).
@@ -228,7 +262,7 @@ declare const googleTools: {
228
262
  *
229
263
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/web-grounding-enterprise
230
264
  */
231
- enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
265
+ enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
232
266
  /**
233
267
  * Creates a Google Maps grounding tool that gives the model access to Google Maps data.
234
268
  * Must have name "google_maps".
@@ -236,12 +270,12 @@ declare const googleTools: {
236
270
  * @see https://ai.google.dev/gemini-api/docs/maps-grounding
237
271
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-maps
238
272
  */
239
- googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
273
+ googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
240
274
  /**
241
275
  * Creates a URL context tool that gives Google direct access to real-time web content.
242
276
  * Must have name "url_context".
243
277
  */
244
- urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
278
+ urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
245
279
  /**
246
280
  * Enables Retrieval Augmented Generation (RAG) via the Gemini File Search tool.
247
281
  * Must have name "file_search".
@@ -257,7 +291,7 @@ declare const googleTools: {
257
291
  fileSearchStoreNames: string[];
258
292
  topK?: number | undefined;
259
293
  metadataFilter?: string | undefined;
260
- }>;
294
+ }, {}>;
261
295
  /**
262
296
  * A tool that enables the model to generate and run Python code.
263
297
  * Must have name "code_execution".
@@ -274,7 +308,7 @@ declare const googleTools: {
274
308
  }, {
275
309
  outcome: string;
276
310
  output: string;
277
- }, {}>;
311
+ }, {}, {}>;
278
312
  /**
279
313
  * Creates a Vertex RAG Store tool that enables the model to perform RAG searches against a Vertex RAG Store.
280
314
  * Must have name "vertex_rag_store".
@@ -282,45 +316,45 @@ declare const googleTools: {
282
316
  vertexRagStore: _ai_sdk_provider_utils.ProviderToolFactory<{}, {
283
317
  ragCorpus: string;
284
318
  topK?: number;
285
- }>;
319
+ }, {}>;
286
320
  };
287
321
 
288
- interface GoogleGenerativeAIProvider extends ProviderV3 {
289
- (modelId: GoogleGenerativeAIModelId): LanguageModelV3;
290
- languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
291
- chat(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
322
+ interface GoogleGenerativeAIProvider extends ProviderV4 {
323
+ (modelId: GoogleGenerativeAIModelId): LanguageModelV4;
324
+ languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
325
+ chat(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
292
326
  /**
293
327
  * Creates a model for image generation.
294
328
  */
295
- image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV3;
329
+ image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV4;
296
330
  /**
297
331
  * @deprecated Use `chat()` instead.
298
332
  */
299
- generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
333
+ generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
300
334
  /**
301
335
  * Creates a model for text embeddings.
302
336
  */
303
- embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
337
+ embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
304
338
  /**
305
339
  * Creates a model for text embeddings.
306
340
  */
307
- embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
341
+ embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
308
342
  /**
309
343
  * @deprecated Use `embedding` instead.
310
344
  */
311
- textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
345
+ textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
312
346
  /**
313
347
  * @deprecated Use `embeddingModel` instead.
314
348
  */
315
- textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
349
+ textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
316
350
  /**
317
351
  * Creates a model for video generation.
318
352
  */
319
- video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
353
+ video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
320
354
  /**
321
355
  * Creates a model for video generation.
322
356
  */
323
- videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
357
+ videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
324
358
  tools: typeof googleTools;
325
359
  }
326
360
  interface GoogleGenerativeAIProviderSettings {
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
2
2
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
3
- import { ProviderV3, LanguageModelV3, ImageModelV3, EmbeddingModelV3, Experimental_VideoModelV3 } from '@ai-sdk/provider';
3
+ import { ProviderV4, LanguageModelV4, ImageModelV4, EmbeddingModelV4, Experimental_VideoModelV4 } from '@ai-sdk/provider';
4
4
 
5
5
  declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
6
6
  error: {
@@ -11,7 +11,7 @@ declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
11
11
  }>;
12
12
  type GoogleErrorData = InferSchema<typeof googleErrorDataSchema>;
13
13
 
14
- type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-exp-image-generation' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-lite-preview-09-2025' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
14
+ type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-lite-preview-09-2025' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
15
15
  declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
16
16
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
17
17
  thinkingConfig?: {
@@ -39,6 +39,7 @@ declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
39
39
  longitude: number;
40
40
  } | undefined;
41
41
  } | undefined;
42
+ serviceTier?: "SERVICE_TIER_STANDARD" | "SERVICE_TIER_FLEX" | "SERVICE_TIER_PRIORITY" | undefined;
42
43
  }>;
43
44
  type GoogleLanguageModelOptions = InferSchema<typeof googleLanguageModelOptions>;
44
45
 
@@ -56,6 +57,21 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
56
57
  mimeType: string;
57
58
  data: string;
58
59
  };
60
+ thought?: boolean | null | undefined;
61
+ thoughtSignature?: string | null | undefined;
62
+ } | {
63
+ toolCall: {
64
+ toolType: string;
65
+ id: string;
66
+ args?: unknown;
67
+ };
68
+ thoughtSignature?: string | null | undefined;
69
+ } | {
70
+ toolResponse: {
71
+ toolType: string;
72
+ id: string;
73
+ response?: unknown;
74
+ };
59
75
  thoughtSignature?: string | null | undefined;
60
76
  } | {
61
77
  executableCode?: {
@@ -72,6 +88,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
72
88
  })[] | null | undefined;
73
89
  } | null | undefined;
74
90
  finishReason?: string | null | undefined;
91
+ finishMessage?: string | null | undefined;
75
92
  safetyRatings?: {
76
93
  category?: string | null | undefined;
77
94
  probability?: string | null | undefined;
@@ -153,18 +170,27 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
153
170
  blocked?: boolean | null | undefined;
154
171
  }[] | null | undefined;
155
172
  } | null | undefined;
173
+ serviceTier?: string | null | undefined;
156
174
  }>;
157
175
  type GroundingMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
158
176
  type UrlContextMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
159
177
  type SafetyRatingSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']>[number];
178
+ type PromptFeedbackSchema = NonNullable<InferSchema<typeof responseSchema>['promptFeedback']>;
179
+ type UsageMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['usageMetadata']>;
160
180
 
161
181
  type GoogleGenerativeAIGroundingMetadata = GroundingMetadataSchema;
162
182
  type GoogleGenerativeAIUrlContextMetadata = UrlContextMetadataSchema;
163
183
  type GoogleGenerativeAISafetyRating = SafetyRatingSchema;
184
+ type GoogleGenerativeAIPromptFeedback = PromptFeedbackSchema;
185
+ type GoogleGenerativeAIUsageMetadata = UsageMetadataSchema;
164
186
  interface GoogleGenerativeAIProviderMetadata {
187
+ promptFeedback: GoogleGenerativeAIPromptFeedback | null;
165
188
  groundingMetadata: GoogleGenerativeAIGroundingMetadata | null;
166
189
  urlContextMetadata: GoogleGenerativeAIUrlContextMetadata | null;
167
190
  safetyRatings: GoogleGenerativeAISafetyRating[] | null;
191
+ usageMetadata: GoogleGenerativeAIUsageMetadata | null;
192
+ finishMessage: string | null;
193
+ serviceTier: string | null;
168
194
  }
169
195
 
170
196
  type GoogleGenerativeAIImageModelId = 'imagen-4.0-generate-001' | 'imagen-4.0-ultra-generate-001' | 'imagen-4.0-fast-generate-001' | 'gemini-2.5-flash-image' | 'gemini-3-pro-image-preview' | 'gemini-3.1-flash-image-preview' | (string & {});
@@ -181,10 +207,18 @@ declare const googleImageModelOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
181
207
  }>;
182
208
  type GoogleImageModelOptions = InferSchema<typeof googleImageModelOptionsSchema>;
183
209
 
184
- type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | (string & {});
210
+ type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | 'gemini-embedding-2-preview' | (string & {});
185
211
  declare const googleEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
186
212
  outputDimensionality?: number | undefined;
187
213
  taskType?: "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "RETRIEVAL_DOCUMENT" | "RETRIEVAL_QUERY" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | undefined;
214
+ content?: (({
215
+ text: string;
216
+ } | {
217
+ inlineData: {
218
+ mimeType: string;
219
+ data: string;
220
+ };
221
+ })[] | null)[] | undefined;
188
222
  }>;
189
223
  type GoogleEmbeddingModelOptions = InferSchema<typeof googleEmbeddingModelOptions>;
190
224
 
@@ -217,7 +251,7 @@ declare const googleTools: {
217
251
  startTime: string;
218
252
  endTime: string;
219
253
  } | undefined;
220
- }>;
254
+ }, {}>;
221
255
  /**
222
256
  * Creates an Enterprise Web Search tool for grounding responses using a compliance-focused web index.
223
257
  * Designed for highly-regulated industries (finance, healthcare, public sector).
@@ -228,7 +262,7 @@ declare const googleTools: {
228
262
  *
229
263
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/web-grounding-enterprise
230
264
  */
231
- enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
265
+ enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
232
266
  /**
233
267
  * Creates a Google Maps grounding tool that gives the model access to Google Maps data.
234
268
  * Must have name "google_maps".
@@ -236,12 +270,12 @@ declare const googleTools: {
236
270
  * @see https://ai.google.dev/gemini-api/docs/maps-grounding
237
271
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-maps
238
272
  */
239
- googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
273
+ googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
240
274
  /**
241
275
  * Creates a URL context tool that gives Google direct access to real-time web content.
242
276
  * Must have name "url_context".
243
277
  */
244
- urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
278
+ urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
245
279
  /**
246
280
  * Enables Retrieval Augmented Generation (RAG) via the Gemini File Search tool.
247
281
  * Must have name "file_search".
@@ -257,7 +291,7 @@ declare const googleTools: {
257
291
  fileSearchStoreNames: string[];
258
292
  topK?: number | undefined;
259
293
  metadataFilter?: string | undefined;
260
- }>;
294
+ }, {}>;
261
295
  /**
262
296
  * A tool that enables the model to generate and run Python code.
263
297
  * Must have name "code_execution".
@@ -274,7 +308,7 @@ declare const googleTools: {
274
308
  }, {
275
309
  outcome: string;
276
310
  output: string;
277
- }, {}>;
311
+ }, {}, {}>;
278
312
  /**
279
313
  * Creates a Vertex RAG Store tool that enables the model to perform RAG searches against a Vertex RAG Store.
280
314
  * Must have name "vertex_rag_store".
@@ -282,45 +316,45 @@ declare const googleTools: {
282
316
  vertexRagStore: _ai_sdk_provider_utils.ProviderToolFactory<{}, {
283
317
  ragCorpus: string;
284
318
  topK?: number;
285
- }>;
319
+ }, {}>;
286
320
  };
287
321
 
288
- interface GoogleGenerativeAIProvider extends ProviderV3 {
289
- (modelId: GoogleGenerativeAIModelId): LanguageModelV3;
290
- languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
291
- chat(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
322
+ interface GoogleGenerativeAIProvider extends ProviderV4 {
323
+ (modelId: GoogleGenerativeAIModelId): LanguageModelV4;
324
+ languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
325
+ chat(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
292
326
  /**
293
327
  * Creates a model for image generation.
294
328
  */
295
- image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV3;
329
+ image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV4;
296
330
  /**
297
331
  * @deprecated Use `chat()` instead.
298
332
  */
299
- generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
333
+ generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
300
334
  /**
301
335
  * Creates a model for text embeddings.
302
336
  */
303
- embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
337
+ embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
304
338
  /**
305
339
  * Creates a model for text embeddings.
306
340
  */
307
- embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
341
+ embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
308
342
  /**
309
343
  * @deprecated Use `embedding` instead.
310
344
  */
311
- textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
345
+ textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
312
346
  /**
313
347
  * @deprecated Use `embeddingModel` instead.
314
348
  */
315
- textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
349
+ textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
316
350
  /**
317
351
  * Creates a model for video generation.
318
352
  */
319
- video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
353
+ video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
320
354
  /**
321
355
  * Creates a model for video generation.
322
356
  */
323
- videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
357
+ videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
324
358
  tools: typeof googleTools;
325
359
  }
326
360
  interface GoogleGenerativeAIProviderSettings {