@ai-sdk/google 4.0.0-beta.4 → 4.0.0-beta.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,280 @@
1
1
  # @ai-sdk/google
2
2
 
3
+ ## 4.0.0-beta.40
4
+
5
+ ### Patch Changes
6
+
7
+ - 55f0938: chore(provider/google): update available models
8
+
9
+ ## 4.0.0-beta.39
10
+
11
+ ### Patch Changes
12
+
13
+ - Updated dependencies [add1126]
14
+ - @ai-sdk/provider-utils@5.0.0-beta.21
15
+
16
+ ## 4.0.0-beta.38
17
+
18
+ ### Patch Changes
19
+
20
+ - b3976a2: Add workflow serialization support to all provider models.
21
+
22
+ **`@ai-sdk/provider-utils`:** New `serializeModel()` helper that extracts only serializable properties from a model instance, filtering out functions and objects containing functions. Third-party provider authors can use this to add workflow support to their own models.
23
+
24
+ **All providers:** `headers` is now optional in provider config types. This is non-breaking — existing code that passes `headers` continues to work. Custom provider implementations that construct model configs manually can now omit `headers`, which is useful when models are deserialized from a workflow step boundary where auth is provided separately.
25
+
26
+ All provider model classes now include `WORKFLOW_SERIALIZE` and `WORKFLOW_DESERIALIZE` static methods, enabling them to cross workflow step boundaries without serialization errors.
27
+
28
+ - ff5eba1: feat: roll `image-*` tool output types into their equivalent `file-*` types
29
+ - Updated dependencies [b3976a2]
30
+ - Updated dependencies [ff5eba1]
31
+ - @ai-sdk/provider-utils@5.0.0-beta.20
32
+ - @ai-sdk/provider@4.0.0-beta.12
33
+
34
+ ## 4.0.0-beta.37
35
+
36
+ ### Major Changes
37
+
38
+ - ef992f8: Remove CommonJS exports from all packages. All packages are now ESM-only (`"type": "module"`). Consumers using `require()` must switch to ESM `import` syntax.
39
+
40
+ ### Patch Changes
41
+
42
+ - Updated dependencies [ef992f8]
43
+ - @ai-sdk/provider@4.0.0-beta.11
44
+ - @ai-sdk/provider-utils@5.0.0-beta.19
45
+
46
+ ## 4.0.0-beta.36
47
+
48
+ ### Patch Changes
49
+
50
+ - 9a0a618: fix(google): fix `serviceTier` to be correctly formatted for Vertex API
51
+
52
+ ## 4.0.0-beta.35
53
+
54
+ ### Patch Changes
55
+
56
+ - 90e2d8a: chore: fix unused vars not being flagged by our lint tooling
57
+ - Updated dependencies [90e2d8a]
58
+ - @ai-sdk/provider-utils@5.0.0-beta.18
59
+
60
+ ## 4.0.0-beta.34
61
+
62
+ ### Patch Changes
63
+
64
+ - 5b7e7c2: fix(google-vertex): don't send streamFunctionCallArguments for unary API calls and change default to false
65
+
66
+ ## 4.0.0-beta.33
67
+
68
+ ### Patch Changes
69
+
70
+ - Updated dependencies [3ae1786]
71
+ - @ai-sdk/provider-utils@5.0.0-beta.17
72
+
73
+ ## 4.0.0-beta.32
74
+
75
+ ### Patch Changes
76
+
77
+ - 5036db8: feat(google-vertex): add support for streaming tool arguments input
78
+
79
+ ## 4.0.0-beta.31
80
+
81
+ ### Patch Changes
82
+
83
+ - Updated dependencies [176466a]
84
+ - @ai-sdk/provider@4.0.0-beta.10
85
+ - @ai-sdk/provider-utils@5.0.0-beta.16
86
+
87
+ ## 4.0.0-beta.30
88
+
89
+ ### Patch Changes
90
+
91
+ - Updated dependencies [e311194]
92
+ - @ai-sdk/provider@4.0.0-beta.9
93
+ - @ai-sdk/provider-utils@5.0.0-beta.15
94
+
95
+ ## 4.0.0-beta.29
96
+
97
+ ### Patch Changes
98
+
99
+ - Updated dependencies [34bd95d]
100
+ - Updated dependencies [008271d]
101
+ - @ai-sdk/provider@4.0.0-beta.8
102
+ - @ai-sdk/provider-utils@5.0.0-beta.14
103
+
104
+ ## 4.0.0-beta.28
105
+
106
+ ### Patch Changes
107
+
108
+ - Updated dependencies [b0c2869]
109
+ - Updated dependencies [7e26e81]
110
+ - @ai-sdk/provider-utils@5.0.0-beta.13
111
+
112
+ ## 4.0.0-beta.27
113
+
114
+ ### Patch Changes
115
+
116
+ - 46d1149: chore(provider-utils,google): fix grammar errors in error and warning messages
117
+ - Updated dependencies [46d1149]
118
+ - @ai-sdk/provider-utils@5.0.0-beta.12
119
+
120
+ ## 4.0.0-beta.26
121
+
122
+ ### Patch Changes
123
+
124
+ - a05109d: feat(provider/google): preserve per-modality token details in usage data
125
+
126
+ ## 4.0.0-beta.25
127
+
128
+ ### Patch Changes
129
+
130
+ - Updated dependencies [6fd51c0]
131
+ - @ai-sdk/provider-utils@5.0.0-beta.11
132
+ - @ai-sdk/provider@4.0.0-beta.7
133
+
134
+ ## 4.0.0-beta.24
135
+
136
+ ### Patch Changes
137
+
138
+ - 55db546: fix(provider/google): fix Gemini service tier enum after upstream update
139
+
140
+ ## 4.0.0-beta.23
141
+
142
+ ### Patch Changes
143
+
144
+ - c29a26f: feat(provider): add support for provider references and uploading files as supported per provider
145
+ - Updated dependencies [c29a26f]
146
+ - @ai-sdk/provider-utils@5.0.0-beta.10
147
+ - @ai-sdk/provider@4.0.0-beta.6
148
+
149
+ ## 4.0.0-beta.22
150
+
151
+ ### Patch Changes
152
+
153
+ - 38fc777: Add AI Gateway hint to provider READMEs
154
+
155
+ ## 4.0.0-beta.21
156
+
157
+ ### Patch Changes
158
+
159
+ - Updated dependencies [2e17091]
160
+ - @ai-sdk/provider-utils@5.0.0-beta.9
161
+
162
+ ## 4.0.0-beta.20
163
+
164
+ ### Patch Changes
165
+
166
+ - Updated dependencies [986c6fd]
167
+ - Updated dependencies [493295c]
168
+ - @ai-sdk/provider-utils@5.0.0-beta.8
169
+
170
+ ## 4.0.0-beta.19
171
+
172
+ ### Patch Changes
173
+
174
+ - 01fa606: feat(provider/google): support combining built-in tools with function calling on Gemini 3
175
+
176
+ ## 4.0.0-beta.18
177
+
178
+ ### Patch Changes
179
+
180
+ - 9715ec7: feat(provider/google): add support for service tier parameter
181
+
182
+ ## 4.0.0-beta.17
183
+
184
+ ### Patch Changes
185
+
186
+ - Updated dependencies [1f509d4]
187
+ - @ai-sdk/provider-utils@5.0.0-beta.7
188
+ - @ai-sdk/provider@4.0.0-beta.5
189
+
190
+ ## 4.0.0-beta.16
191
+
192
+ ### Patch Changes
193
+
194
+ - 6190649: chore(provider/google): remove obsolete Google image model
195
+
196
+ ## 4.0.0-beta.15
197
+
198
+ ### Patch Changes
199
+
200
+ - 18c1970: feat(provider/google): Add multimodal tool-result support for Google function responses.
201
+
202
+ Tool results with `output.type = 'content'` now map media parts into
203
+ `functionResponse.parts` for Google models, including `image-data`,
204
+ `file-data`, and base64 `data:` URLs in URL-style content parts.
205
+ Remote HTTP(S) URLs in URL-style tool-result parts are not supported.
206
+
207
+ ## 4.0.0-beta.14
208
+
209
+ ### Patch Changes
210
+
211
+ - 3887c70: feat(provider): add new top-level reasoning parameter to spec and support it in `generateText` and `streamText`
212
+ - Updated dependencies [3887c70]
213
+ - @ai-sdk/provider-utils@5.0.0-beta.6
214
+ - @ai-sdk/provider@4.0.0-beta.4
215
+
216
+ ## 4.0.0-beta.13
217
+
218
+ ### Patch Changes
219
+
220
+ - Updated dependencies [776b617]
221
+ - @ai-sdk/provider-utils@5.0.0-beta.5
222
+ - @ai-sdk/provider@4.0.0-beta.3
223
+
224
+ ## 4.0.0-beta.12
225
+
226
+ ### Patch Changes
227
+
228
+ - Updated dependencies [61753c3]
229
+ - @ai-sdk/provider-utils@5.0.0-beta.4
230
+
231
+ ## 4.0.0-beta.11
232
+
233
+ ### Patch Changes
234
+
235
+ - f7d4f01: feat(provider): add support for `reasoning-file` type for files that are part of reasoning
236
+ - Updated dependencies [f7d4f01]
237
+ - @ai-sdk/provider-utils@5.0.0-beta.3
238
+ - @ai-sdk/provider@4.0.0-beta.2
239
+
240
+ ## 4.0.0-beta.10
241
+
242
+ ### Patch Changes
243
+
244
+ - Updated dependencies [5c2a5a2]
245
+ - @ai-sdk/provider@4.0.0-beta.1
246
+ - @ai-sdk/provider-utils@5.0.0-beta.2
247
+
248
+ ## 4.0.0-beta.9
249
+
250
+ ### Patch Changes
251
+
252
+ - e59176c: chore(google): update v3 specs to v4
253
+
254
+ ## 4.0.0-beta.8
255
+
256
+ ### Patch Changes
257
+
258
+ - 4dac142: feat(google): add new finishMessage field in providerMetadata
259
+
260
+ ## 4.0.0-beta.7
261
+
262
+ ### Patch Changes
263
+
264
+ - 82288b0: feat(provider/google): add `gemini-embedding-2-preview` and fix multimodal embedding support with `embedMany`
265
+
266
+ ## 4.0.0-beta.6
267
+
268
+ ### Patch Changes
269
+
270
+ - add4326: fix(provider/google): correct JSDoc for multimodal embedding content option
271
+
272
+ ## 4.0.0-beta.5
273
+
274
+ ### Patch Changes
275
+
276
+ - ab43029: feat(provider/google): support multimodal content parts in embedding provider options
277
+
3
278
  ## 4.0.0-beta.4
4
279
 
5
280
  ### Patch Changes
@@ -351,13 +626,13 @@
351
626
  Before
352
627
 
353
628
  ```ts
354
- model.textEmbeddingModel('my-model-id');
629
+ model.textEmbeddingModel("my-model-id");
355
630
  ```
356
631
 
357
632
  After
358
633
 
359
634
  ```ts
360
- model.embeddingModel('my-model-id');
635
+ model.embeddingModel("my-model-id");
361
636
  ```
362
637
 
363
638
  - 2625a04: feat(openai); update spec for mcp approval
@@ -666,13 +941,13 @@
666
941
  Before
667
942
 
668
943
  ```ts
669
- model.textEmbeddingModel('my-model-id');
944
+ model.textEmbeddingModel("my-model-id");
670
945
  ```
671
946
 
672
947
  After
673
948
 
674
949
  ```ts
675
- model.embeddingModel('my-model-id');
950
+ model.embeddingModel("my-model-id");
676
951
  ```
677
952
 
678
953
  - Updated dependencies [8d9e8ad]
package/README.md CHANGED
@@ -2,6 +2,8 @@
2
2
 
3
3
  The **[Google Generative AI provider](https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the [Google Generative AI](https://ai.google/discover/generativeai/) APIs.
4
4
 
5
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access Google (and hundreds of models from other providers) — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
6
+
5
7
  ## Setup
6
8
 
7
9
  The Google Generative AI provider is available in the `@ai-sdk/google` module. You can install it with
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
2
2
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
3
- import { ProviderV3, LanguageModelV3, ImageModelV3, EmbeddingModelV3, Experimental_VideoModelV3 } from '@ai-sdk/provider';
3
+ import { ProviderV4, LanguageModelV4, ImageModelV4, EmbeddingModelV4, Experimental_VideoModelV4, FilesV4 } from '@ai-sdk/provider';
4
4
 
5
5
  declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
6
6
  error: {
@@ -11,7 +11,7 @@ declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
11
11
  }>;
12
12
  type GoogleErrorData = InferSchema<typeof googleErrorDataSchema>;
13
13
 
14
- type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-exp-image-generation' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-lite-preview-09-2025' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
14
+ type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-3.1-flash-tts-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
15
15
  declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
16
16
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
17
17
  thinkingConfig?: {
@@ -39,6 +39,8 @@ declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
39
39
  longitude: number;
40
40
  } | undefined;
41
41
  } | undefined;
42
+ streamFunctionCallArguments?: boolean | undefined;
43
+ serviceTier?: "standard" | "flex" | "priority" | undefined;
42
44
  }>;
43
45
  type GoogleLanguageModelOptions = InferSchema<typeof googleLanguageModelOptions>;
44
46
 
@@ -47,8 +49,17 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
47
49
  content?: Record<string, never> | {
48
50
  parts?: ({
49
51
  functionCall: {
50
- name: string;
51
- args: unknown;
52
+ name?: string | null | undefined;
53
+ args?: unknown;
54
+ partialArgs?: {
55
+ jsonPath: string;
56
+ stringValue?: string | null | undefined;
57
+ numberValue?: number | null | undefined;
58
+ boolValue?: boolean | null | undefined;
59
+ nullValue?: unknown;
60
+ willContinue?: boolean | null | undefined;
61
+ }[] | null | undefined;
62
+ willContinue?: boolean | null | undefined;
52
63
  };
53
64
  thoughtSignature?: string | null | undefined;
54
65
  } | {
@@ -58,6 +69,20 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
58
69
  };
59
70
  thought?: boolean | null | undefined;
60
71
  thoughtSignature?: string | null | undefined;
72
+ } | {
73
+ toolCall: {
74
+ toolType: string;
75
+ id: string;
76
+ args?: unknown;
77
+ };
78
+ thoughtSignature?: string | null | undefined;
79
+ } | {
80
+ toolResponse: {
81
+ toolType: string;
82
+ id: string;
83
+ response?: unknown;
84
+ };
85
+ thoughtSignature?: string | null | undefined;
61
86
  } | {
62
87
  executableCode?: {
63
88
  language: string;
@@ -73,6 +98,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
73
98
  })[] | null | undefined;
74
99
  } | null | undefined;
75
100
  finishReason?: string | null | undefined;
101
+ finishMessage?: string | null | undefined;
76
102
  safetyRatings?: {
77
103
  category?: string | null | undefined;
78
104
  probability?: string | null | undefined;
@@ -142,6 +168,14 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
142
168
  candidatesTokenCount?: number | null | undefined;
143
169
  totalTokenCount?: number | null | undefined;
144
170
  trafficType?: string | null | undefined;
171
+ promptTokensDetails?: {
172
+ modality: string;
173
+ tokenCount: number;
174
+ }[] | null | undefined;
175
+ candidatesTokensDetails?: {
176
+ modality: string;
177
+ tokenCount: number;
178
+ }[] | null | undefined;
145
179
  } | null | undefined;
146
180
  promptFeedback?: {
147
181
  blockReason?: string | null | undefined;
@@ -154,18 +188,27 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
154
188
  blocked?: boolean | null | undefined;
155
189
  }[] | null | undefined;
156
190
  } | null | undefined;
191
+ serviceTier?: string | null | undefined;
157
192
  }>;
158
193
  type GroundingMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
159
194
  type UrlContextMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
160
195
  type SafetyRatingSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']>[number];
196
+ type PromptFeedbackSchema = NonNullable<InferSchema<typeof responseSchema>['promptFeedback']>;
197
+ type UsageMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['usageMetadata']>;
161
198
 
162
199
  type GoogleGenerativeAIGroundingMetadata = GroundingMetadataSchema;
163
200
  type GoogleGenerativeAIUrlContextMetadata = UrlContextMetadataSchema;
164
201
  type GoogleGenerativeAISafetyRating = SafetyRatingSchema;
202
+ type GoogleGenerativeAIPromptFeedback = PromptFeedbackSchema;
203
+ type GoogleGenerativeAIUsageMetadata = UsageMetadataSchema;
165
204
  interface GoogleGenerativeAIProviderMetadata {
205
+ promptFeedback: GoogleGenerativeAIPromptFeedback | null;
166
206
  groundingMetadata: GoogleGenerativeAIGroundingMetadata | null;
167
207
  urlContextMetadata: GoogleGenerativeAIUrlContextMetadata | null;
168
208
  safetyRatings: GoogleGenerativeAISafetyRating[] | null;
209
+ usageMetadata: GoogleGenerativeAIUsageMetadata | null;
210
+ finishMessage: string | null;
211
+ serviceTier: string | null;
169
212
  }
170
213
 
171
214
  type GoogleGenerativeAIImageModelId = 'imagen-4.0-generate-001' | 'imagen-4.0-ultra-generate-001' | 'imagen-4.0-fast-generate-001' | 'gemini-2.5-flash-image' | 'gemini-3-pro-image-preview' | 'gemini-3.1-flash-image-preview' | (string & {});
@@ -182,14 +225,22 @@ declare const googleImageModelOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
182
225
  }>;
183
226
  type GoogleImageModelOptions = InferSchema<typeof googleImageModelOptionsSchema>;
184
227
 
185
- type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | (string & {});
228
+ type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | 'gemini-embedding-2-preview' | (string & {});
186
229
  declare const googleEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
187
230
  outputDimensionality?: number | undefined;
188
231
  taskType?: "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "RETRIEVAL_DOCUMENT" | "RETRIEVAL_QUERY" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | undefined;
232
+ content?: (({
233
+ text: string;
234
+ } | {
235
+ inlineData: {
236
+ mimeType: string;
237
+ data: string;
238
+ };
239
+ })[] | null)[] | undefined;
189
240
  }>;
190
241
  type GoogleEmbeddingModelOptions = InferSchema<typeof googleEmbeddingModelOptions>;
191
242
 
192
- type GoogleGenerativeAIVideoModelId = 'veo-3.1-fast-generate-preview' | 'veo-3.1-generate-preview' | 'veo-3.1-generate' | 'veo-3.0-generate-001' | 'veo-3.0-fast-generate-001' | 'veo-2.0-generate-001' | (string & {});
243
+ type GoogleGenerativeAIVideoModelId = 'veo-3.1-fast-generate-preview' | 'veo-3.1-generate-preview' | 'veo-3.1-generate' | 'veo-3.1-lite-generate-preview' | 'veo-3.0-generate-001' | 'veo-3.0-fast-generate-001' | 'veo-2.0-generate-001' | (string & {});
193
244
 
194
245
  type GoogleVideoModelOptions = {
195
246
  pollIntervalMs?: number | null;
@@ -203,6 +254,13 @@ type GoogleVideoModelOptions = {
203
254
  [key: string]: unknown;
204
255
  };
205
256
 
257
+ type GoogleFilesUploadOptions = {
258
+ displayName?: string | null;
259
+ pollIntervalMs?: number | null;
260
+ pollTimeoutMs?: number | null;
261
+ [key: string]: unknown;
262
+ };
263
+
206
264
  declare const googleTools: {
207
265
  /**
208
266
  * Creates a Google search tool that gives Google direct access to real-time web content.
@@ -218,7 +276,7 @@ declare const googleTools: {
218
276
  startTime: string;
219
277
  endTime: string;
220
278
  } | undefined;
221
- }>;
279
+ }, {}>;
222
280
  /**
223
281
  * Creates an Enterprise Web Search tool for grounding responses using a compliance-focused web index.
224
282
  * Designed for highly-regulated industries (finance, healthcare, public sector).
@@ -229,7 +287,7 @@ declare const googleTools: {
229
287
  *
230
288
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/web-grounding-enterprise
231
289
  */
232
- enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
290
+ enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
233
291
  /**
234
292
  * Creates a Google Maps grounding tool that gives the model access to Google Maps data.
235
293
  * Must have name "google_maps".
@@ -237,12 +295,12 @@ declare const googleTools: {
237
295
  * @see https://ai.google.dev/gemini-api/docs/maps-grounding
238
296
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-maps
239
297
  */
240
- googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
298
+ googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
241
299
  /**
242
300
  * Creates a URL context tool that gives Google direct access to real-time web content.
243
301
  * Must have name "url_context".
244
302
  */
245
- urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
303
+ urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
246
304
  /**
247
305
  * Enables Retrieval Augmented Generation (RAG) via the Gemini File Search tool.
248
306
  * Must have name "file_search".
@@ -258,7 +316,7 @@ declare const googleTools: {
258
316
  fileSearchStoreNames: string[];
259
317
  topK?: number | undefined;
260
318
  metadataFilter?: string | undefined;
261
- }>;
319
+ }, {}>;
262
320
  /**
263
321
  * A tool that enables the model to generate and run Python code.
264
322
  * Must have name "code_execution".
@@ -275,7 +333,7 @@ declare const googleTools: {
275
333
  }, {
276
334
  outcome: string;
277
335
  output: string;
278
- }, {}>;
336
+ }, {}, {}>;
279
337
  /**
280
338
  * Creates a Vertex RAG Store tool that enables the model to perform RAG searches against a Vertex RAG Store.
281
339
  * Must have name "vertex_rag_store".
@@ -283,45 +341,46 @@ declare const googleTools: {
283
341
  vertexRagStore: _ai_sdk_provider_utils.ProviderToolFactory<{}, {
284
342
  ragCorpus: string;
285
343
  topK?: number;
286
- }>;
344
+ }, {}>;
287
345
  };
288
346
 
289
- interface GoogleGenerativeAIProvider extends ProviderV3 {
290
- (modelId: GoogleGenerativeAIModelId): LanguageModelV3;
291
- languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
292
- chat(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
347
+ interface GoogleGenerativeAIProvider extends ProviderV4 {
348
+ (modelId: GoogleGenerativeAIModelId): LanguageModelV4;
349
+ languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
350
+ chat(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
293
351
  /**
294
352
  * Creates a model for image generation.
295
353
  */
296
- image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV3;
354
+ image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV4;
297
355
  /**
298
356
  * @deprecated Use `chat()` instead.
299
357
  */
300
- generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
358
+ generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
301
359
  /**
302
360
  * Creates a model for text embeddings.
303
361
  */
304
- embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
362
+ embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
305
363
  /**
306
364
  * Creates a model for text embeddings.
307
365
  */
308
- embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
366
+ embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
309
367
  /**
310
368
  * @deprecated Use `embedding` instead.
311
369
  */
312
- textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
370
+ textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
313
371
  /**
314
372
  * @deprecated Use `embeddingModel` instead.
315
373
  */
316
- textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
374
+ textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
317
375
  /**
318
376
  * Creates a model for video generation.
319
377
  */
320
- video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
378
+ video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
321
379
  /**
322
380
  * Creates a model for video generation.
323
381
  */
324
- videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
382
+ videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
383
+ files(): FilesV4;
325
384
  tools: typeof googleTools;
326
385
  }
327
386
  interface GoogleGenerativeAIProviderSettings {
@@ -365,4 +424,4 @@ declare const google: GoogleGenerativeAIProvider;
365
424
 
366
425
  declare const VERSION: string;
367
426
 
368
- export { type GoogleEmbeddingModelOptions, type GoogleErrorData, type GoogleEmbeddingModelOptions as GoogleGenerativeAIEmbeddingProviderOptions, type GoogleImageModelOptions as GoogleGenerativeAIImageProviderOptions, type GoogleGenerativeAIProvider, type GoogleGenerativeAIProviderMetadata, type GoogleLanguageModelOptions as GoogleGenerativeAIProviderOptions, type GoogleGenerativeAIProviderSettings, type GoogleGenerativeAIVideoModelId, type GoogleVideoModelOptions as GoogleGenerativeAIVideoProviderOptions, type GoogleImageModelOptions, type GoogleLanguageModelOptions, type GoogleVideoModelOptions, VERSION, createGoogleGenerativeAI, google };
427
+ export { type GoogleEmbeddingModelOptions, type GoogleErrorData, type GoogleFilesUploadOptions, type GoogleEmbeddingModelOptions as GoogleGenerativeAIEmbeddingProviderOptions, type GoogleImageModelOptions as GoogleGenerativeAIImageProviderOptions, type GoogleGenerativeAIProvider, type GoogleGenerativeAIProviderMetadata, type GoogleLanguageModelOptions as GoogleGenerativeAIProviderOptions, type GoogleGenerativeAIProviderSettings, type GoogleGenerativeAIVideoModelId, type GoogleVideoModelOptions as GoogleGenerativeAIVideoProviderOptions, type GoogleImageModelOptions, type GoogleLanguageModelOptions, type GoogleVideoModelOptions, VERSION, createGoogleGenerativeAI, google };