@ai-sdk/google 4.0.0-beta.3 → 4.0.0-beta.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,210 @@
1
1
  # @ai-sdk/google
2
2
 
3
+ ## 4.0.0-beta.31
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [176466a]
8
+ - @ai-sdk/provider@4.0.0-beta.10
9
+ - @ai-sdk/provider-utils@5.0.0-beta.16
10
+
11
+ ## 4.0.0-beta.30
12
+
13
+ ### Patch Changes
14
+
15
+ - Updated dependencies [e311194]
16
+ - @ai-sdk/provider@4.0.0-beta.9
17
+ - @ai-sdk/provider-utils@5.0.0-beta.15
18
+
19
+ ## 4.0.0-beta.29
20
+
21
+ ### Patch Changes
22
+
23
+ - Updated dependencies [34bd95d]
24
+ - Updated dependencies [008271d]
25
+ - @ai-sdk/provider@4.0.0-beta.8
26
+ - @ai-sdk/provider-utils@5.0.0-beta.14
27
+
28
+ ## 4.0.0-beta.28
29
+
30
+ ### Patch Changes
31
+
32
+ - Updated dependencies [b0c2869]
33
+ - Updated dependencies [7e26e81]
34
+ - @ai-sdk/provider-utils@5.0.0-beta.13
35
+
36
+ ## 4.0.0-beta.27
37
+
38
+ ### Patch Changes
39
+
40
+ - 46d1149: chore(provider-utils,google): fix grammar errors in error and warning messages
41
+ - Updated dependencies [46d1149]
42
+ - @ai-sdk/provider-utils@5.0.0-beta.12
43
+
44
+ ## 4.0.0-beta.26
45
+
46
+ ### Patch Changes
47
+
48
+ - a05109d: feat(provider/google): preserve per-modality token details in usage data
49
+
50
+ ## 4.0.0-beta.25
51
+
52
+ ### Patch Changes
53
+
54
+ - Updated dependencies [6fd51c0]
55
+ - @ai-sdk/provider-utils@5.0.0-beta.11
56
+ - @ai-sdk/provider@4.0.0-beta.7
57
+
58
+ ## 4.0.0-beta.24
59
+
60
+ ### Patch Changes
61
+
62
+ - 55db546: fix(provider/google): fix Gemini service tier enum after upstream update
63
+
64
+ ## 4.0.0-beta.23
65
+
66
+ ### Patch Changes
67
+
68
+ - c29a26f: feat(provider): add support for provider references and uploading files as supported per provider
69
+ - Updated dependencies [c29a26f]
70
+ - @ai-sdk/provider-utils@5.0.0-beta.10
71
+ - @ai-sdk/provider@4.0.0-beta.6
72
+
73
+ ## 4.0.0-beta.22
74
+
75
+ ### Patch Changes
76
+
77
+ - 38fc777: Add AI Gateway hint to provider READMEs
78
+
79
+ ## 4.0.0-beta.21
80
+
81
+ ### Patch Changes
82
+
83
+ - Updated dependencies [2e17091]
84
+ - @ai-sdk/provider-utils@5.0.0-beta.9
85
+
86
+ ## 4.0.0-beta.20
87
+
88
+ ### Patch Changes
89
+
90
+ - Updated dependencies [986c6fd]
91
+ - Updated dependencies [493295c]
92
+ - @ai-sdk/provider-utils@5.0.0-beta.8
93
+
94
+ ## 4.0.0-beta.19
95
+
96
+ ### Patch Changes
97
+
98
+ - 01fa606: feat(provider/google): support combining built-in tools with function calling on Gemini 3
99
+
100
+ ## 4.0.0-beta.18
101
+
102
+ ### Patch Changes
103
+
104
+ - 9715ec7: feat(provider/google): add support for service tier parameter
105
+
106
+ ## 4.0.0-beta.17
107
+
108
+ ### Patch Changes
109
+
110
+ - Updated dependencies [1f509d4]
111
+ - @ai-sdk/provider-utils@5.0.0-beta.7
112
+ - @ai-sdk/provider@4.0.0-beta.5
113
+
114
+ ## 4.0.0-beta.16
115
+
116
+ ### Patch Changes
117
+
118
+ - 6190649: chore(provider/google): remove obsolete Google image model
119
+
120
+ ## 4.0.0-beta.15
121
+
122
+ ### Patch Changes
123
+
124
+ - 18c1970: feat(provider/google): Add multimodal tool-result support for Google function responses.
125
+
126
+ Tool results with `output.type = 'content'` now map media parts into
127
+ `functionResponse.parts` for Google models, including `image-data`,
128
+ `file-data`, and base64 `data:` URLs in URL-style content parts.
129
+ Remote HTTP(S) URLs in URL-style tool-result parts are not supported.
130
+
131
+ ## 4.0.0-beta.14
132
+
133
+ ### Patch Changes
134
+
135
+ - 3887c70: feat(provider): add new top-level reasoning parameter to spec and support it in `generateText` and `streamText`
136
+ - Updated dependencies [3887c70]
137
+ - @ai-sdk/provider-utils@5.0.0-beta.6
138
+ - @ai-sdk/provider@4.0.0-beta.4
139
+
140
+ ## 4.0.0-beta.13
141
+
142
+ ### Patch Changes
143
+
144
+ - Updated dependencies [776b617]
145
+ - @ai-sdk/provider-utils@5.0.0-beta.5
146
+ - @ai-sdk/provider@4.0.0-beta.3
147
+
148
+ ## 4.0.0-beta.12
149
+
150
+ ### Patch Changes
151
+
152
+ - Updated dependencies [61753c3]
153
+ - @ai-sdk/provider-utils@5.0.0-beta.4
154
+
155
+ ## 4.0.0-beta.11
156
+
157
+ ### Patch Changes
158
+
159
+ - f7d4f01: feat(provider): add support for `reasoning-file` type for files that are part of reasoning
160
+ - Updated dependencies [f7d4f01]
161
+ - @ai-sdk/provider-utils@5.0.0-beta.3
162
+ - @ai-sdk/provider@4.0.0-beta.2
163
+
164
+ ## 4.0.0-beta.10
165
+
166
+ ### Patch Changes
167
+
168
+ - Updated dependencies [5c2a5a2]
169
+ - @ai-sdk/provider@4.0.0-beta.1
170
+ - @ai-sdk/provider-utils@5.0.0-beta.2
171
+
172
+ ## 4.0.0-beta.9
173
+
174
+ ### Patch Changes
175
+
176
+ - e59176c: chore(google): update v3 specs to v4
177
+
178
+ ## 4.0.0-beta.8
179
+
180
+ ### Patch Changes
181
+
182
+ - 4dac142: feat(google): add new finishMessage field in providerMetadata
183
+
184
+ ## 4.0.0-beta.7
185
+
186
+ ### Patch Changes
187
+
188
+ - 82288b0: feat(provider/google): add `gemini-embedding-2-preview` and fix multimodal embedding support with `embedMany`
189
+
190
+ ## 4.0.0-beta.6
191
+
192
+ ### Patch Changes
193
+
194
+ - add4326: fix(provider/google): correct JSDoc for multimodal embedding content option
195
+
196
+ ## 4.0.0-beta.5
197
+
198
+ ### Patch Changes
199
+
200
+ - ab43029: feat(provider/google): support multimodal content parts in embedding provider options
201
+
202
+ ## 4.0.0-beta.4
203
+
204
+ ### Patch Changes
205
+
206
+ - 2edd14e: fix(provider/google): correctly mark reasoning files as such and fix related multi-turn errors
207
+
3
208
  ## 4.0.0-beta.3
4
209
 
5
210
  ### Patch Changes
@@ -345,13 +550,13 @@
345
550
  Before
346
551
 
347
552
  ```ts
348
- model.textEmbeddingModel('my-model-id');
553
+ model.textEmbeddingModel("my-model-id");
349
554
  ```
350
555
 
351
556
  After
352
557
 
353
558
  ```ts
354
- model.embeddingModel('my-model-id');
559
+ model.embeddingModel("my-model-id");
355
560
  ```
356
561
 
357
562
  - 2625a04: feat(openai); update spec for mcp approval
@@ -660,13 +865,13 @@
660
865
  Before
661
866
 
662
867
  ```ts
663
- model.textEmbeddingModel('my-model-id');
868
+ model.textEmbeddingModel("my-model-id");
664
869
  ```
665
870
 
666
871
  After
667
872
 
668
873
  ```ts
669
- model.embeddingModel('my-model-id');
874
+ model.embeddingModel("my-model-id");
670
875
  ```
671
876
 
672
877
  - Updated dependencies [8d9e8ad]
package/README.md CHANGED
@@ -2,6 +2,8 @@
2
2
 
3
3
  The **[Google Generative AI provider](https://ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the [Google Generative AI](https://ai.google/discover/generativeai/) APIs.
4
4
 
5
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access Google (and hundreds of models from other providers) — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
6
+
5
7
  ## Setup
6
8
 
7
9
  The Google Generative AI provider is available in the `@ai-sdk/google` module. You can install it with
package/dist/index.d.mts CHANGED
@@ -1,6 +1,6 @@
1
1
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
2
2
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
3
- import { ProviderV3, LanguageModelV3, ImageModelV3, EmbeddingModelV3, Experimental_VideoModelV3 } from '@ai-sdk/provider';
3
+ import { ProviderV4, LanguageModelV4, ImageModelV4, EmbeddingModelV4, Experimental_VideoModelV4, FilesV4 } from '@ai-sdk/provider';
4
4
 
5
5
  declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
6
6
  error: {
@@ -11,7 +11,7 @@ declare const googleErrorDataSchema: _ai_sdk_provider_utils.LazySchema<{
11
11
  }>;
12
12
  type GoogleErrorData = InferSchema<typeof googleErrorDataSchema>;
13
13
 
14
- type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-exp-image-generation' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-lite-preview-09-2025' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
14
+ type GoogleGenerativeAIModelId = 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-flash-lite-001' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-image' | 'gemini-2.5-flash-lite' | 'gemini-2.5-flash-lite-preview-09-2025' | 'gemini-2.5-flash-preview-tts' | 'gemini-2.5-pro-preview-tts' | 'gemini-2.5-flash-native-audio-latest' | 'gemini-2.5-flash-native-audio-preview-09-2025' | 'gemini-2.5-flash-native-audio-preview-12-2025' | 'gemini-2.5-computer-use-preview-10-2025' | 'gemini-3-pro-preview' | 'gemini-3-pro-image-preview' | 'gemini-3-flash-preview' | 'gemini-3.1-pro-preview' | 'gemini-3.1-pro-preview-customtools' | 'gemini-3.1-flash-image-preview' | 'gemini-3.1-flash-lite-preview' | 'gemini-pro-latest' | 'gemini-flash-latest' | 'gemini-flash-lite-latest' | 'deep-research-pro-preview-12-2025' | 'nano-banana-pro-preview' | 'aqa' | 'gemini-robotics-er-1.5-preview' | 'gemma-3-1b-it' | 'gemma-3-4b-it' | 'gemma-3n-e4b-it' | 'gemma-3n-e2b-it' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | (string & {});
15
15
  declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
16
16
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
17
17
  thinkingConfig?: {
@@ -39,6 +39,7 @@ declare const googleLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
39
39
  longitude: number;
40
40
  } | undefined;
41
41
  } | undefined;
42
+ serviceTier?: "standard" | "flex" | "priority" | undefined;
42
43
  }>;
43
44
  type GoogleLanguageModelOptions = InferSchema<typeof googleLanguageModelOptions>;
44
45
 
@@ -56,6 +57,21 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
56
57
  mimeType: string;
57
58
  data: string;
58
59
  };
60
+ thought?: boolean | null | undefined;
61
+ thoughtSignature?: string | null | undefined;
62
+ } | {
63
+ toolCall: {
64
+ toolType: string;
65
+ id: string;
66
+ args?: unknown;
67
+ };
68
+ thoughtSignature?: string | null | undefined;
69
+ } | {
70
+ toolResponse: {
71
+ toolType: string;
72
+ id: string;
73
+ response?: unknown;
74
+ };
59
75
  thoughtSignature?: string | null | undefined;
60
76
  } | {
61
77
  executableCode?: {
@@ -72,6 +88,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
72
88
  })[] | null | undefined;
73
89
  } | null | undefined;
74
90
  finishReason?: string | null | undefined;
91
+ finishMessage?: string | null | undefined;
75
92
  safetyRatings?: {
76
93
  category?: string | null | undefined;
77
94
  probability?: string | null | undefined;
@@ -141,6 +158,14 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
141
158
  candidatesTokenCount?: number | null | undefined;
142
159
  totalTokenCount?: number | null | undefined;
143
160
  trafficType?: string | null | undefined;
161
+ promptTokensDetails?: {
162
+ modality: string;
163
+ tokenCount: number;
164
+ }[] | null | undefined;
165
+ candidatesTokensDetails?: {
166
+ modality: string;
167
+ tokenCount: number;
168
+ }[] | null | undefined;
144
169
  } | null | undefined;
145
170
  promptFeedback?: {
146
171
  blockReason?: string | null | undefined;
@@ -153,18 +178,27 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
153
178
  blocked?: boolean | null | undefined;
154
179
  }[] | null | undefined;
155
180
  } | null | undefined;
181
+ serviceTier?: string | null | undefined;
156
182
  }>;
157
183
  type GroundingMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
158
184
  type UrlContextMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
159
185
  type SafetyRatingSchema = NonNullable<InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']>[number];
186
+ type PromptFeedbackSchema = NonNullable<InferSchema<typeof responseSchema>['promptFeedback']>;
187
+ type UsageMetadataSchema = NonNullable<InferSchema<typeof responseSchema>['usageMetadata']>;
160
188
 
161
189
  type GoogleGenerativeAIGroundingMetadata = GroundingMetadataSchema;
162
190
  type GoogleGenerativeAIUrlContextMetadata = UrlContextMetadataSchema;
163
191
  type GoogleGenerativeAISafetyRating = SafetyRatingSchema;
192
+ type GoogleGenerativeAIPromptFeedback = PromptFeedbackSchema;
193
+ type GoogleGenerativeAIUsageMetadata = UsageMetadataSchema;
164
194
  interface GoogleGenerativeAIProviderMetadata {
195
+ promptFeedback: GoogleGenerativeAIPromptFeedback | null;
165
196
  groundingMetadata: GoogleGenerativeAIGroundingMetadata | null;
166
197
  urlContextMetadata: GoogleGenerativeAIUrlContextMetadata | null;
167
198
  safetyRatings: GoogleGenerativeAISafetyRating[] | null;
199
+ usageMetadata: GoogleGenerativeAIUsageMetadata | null;
200
+ finishMessage: string | null;
201
+ serviceTier: string | null;
168
202
  }
169
203
 
170
204
  type GoogleGenerativeAIImageModelId = 'imagen-4.0-generate-001' | 'imagen-4.0-ultra-generate-001' | 'imagen-4.0-fast-generate-001' | 'gemini-2.5-flash-image' | 'gemini-3-pro-image-preview' | 'gemini-3.1-flash-image-preview' | (string & {});
@@ -181,10 +215,18 @@ declare const googleImageModelOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
181
215
  }>;
182
216
  type GoogleImageModelOptions = InferSchema<typeof googleImageModelOptionsSchema>;
183
217
 
184
- type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | (string & {});
218
+ type GoogleGenerativeAIEmbeddingModelId = 'gemini-embedding-001' | 'gemini-embedding-2-preview' | (string & {});
185
219
  declare const googleEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
186
220
  outputDimensionality?: number | undefined;
187
221
  taskType?: "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "RETRIEVAL_DOCUMENT" | "RETRIEVAL_QUERY" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | undefined;
222
+ content?: (({
223
+ text: string;
224
+ } | {
225
+ inlineData: {
226
+ mimeType: string;
227
+ data: string;
228
+ };
229
+ })[] | null)[] | undefined;
188
230
  }>;
189
231
  type GoogleEmbeddingModelOptions = InferSchema<typeof googleEmbeddingModelOptions>;
190
232
 
@@ -202,6 +244,13 @@ type GoogleVideoModelOptions = {
202
244
  [key: string]: unknown;
203
245
  };
204
246
 
247
+ type GoogleFilesUploadOptions = {
248
+ displayName?: string | null;
249
+ pollIntervalMs?: number | null;
250
+ pollTimeoutMs?: number | null;
251
+ [key: string]: unknown;
252
+ };
253
+
205
254
  declare const googleTools: {
206
255
  /**
207
256
  * Creates a Google search tool that gives Google direct access to real-time web content.
@@ -217,7 +266,7 @@ declare const googleTools: {
217
266
  startTime: string;
218
267
  endTime: string;
219
268
  } | undefined;
220
- }>;
269
+ }, {}>;
221
270
  /**
222
271
  * Creates an Enterprise Web Search tool for grounding responses using a compliance-focused web index.
223
272
  * Designed for highly-regulated industries (finance, healthcare, public sector).
@@ -228,7 +277,7 @@ declare const googleTools: {
228
277
  *
229
278
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/web-grounding-enterprise
230
279
  */
231
- enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
280
+ enterpriseWebSearch: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
232
281
  /**
233
282
  * Creates a Google Maps grounding tool that gives the model access to Google Maps data.
234
283
  * Must have name "google_maps".
@@ -236,12 +285,12 @@ declare const googleTools: {
236
285
  * @see https://ai.google.dev/gemini-api/docs/maps-grounding
237
286
  * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-maps
238
287
  */
239
- googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
288
+ googleMaps: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
240
289
  /**
241
290
  * Creates a URL context tool that gives Google direct access to real-time web content.
242
291
  * Must have name "url_context".
243
292
  */
244
- urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}>;
293
+ urlContext: _ai_sdk_provider_utils.ProviderToolFactory<{}, {}, {}>;
245
294
  /**
246
295
  * Enables Retrieval Augmented Generation (RAG) via the Gemini File Search tool.
247
296
  * Must have name "file_search".
@@ -257,7 +306,7 @@ declare const googleTools: {
257
306
  fileSearchStoreNames: string[];
258
307
  topK?: number | undefined;
259
308
  metadataFilter?: string | undefined;
260
- }>;
309
+ }, {}>;
261
310
  /**
262
311
  * A tool that enables the model to generate and run Python code.
263
312
  * Must have name "code_execution".
@@ -274,7 +323,7 @@ declare const googleTools: {
274
323
  }, {
275
324
  outcome: string;
276
325
  output: string;
277
- }, {}>;
326
+ }, {}, {}>;
278
327
  /**
279
328
  * Creates a Vertex RAG Store tool that enables the model to perform RAG searches against a Vertex RAG Store.
280
329
  * Must have name "vertex_rag_store".
@@ -282,45 +331,46 @@ declare const googleTools: {
282
331
  vertexRagStore: _ai_sdk_provider_utils.ProviderToolFactory<{}, {
283
332
  ragCorpus: string;
284
333
  topK?: number;
285
- }>;
334
+ }, {}>;
286
335
  };
287
336
 
288
- interface GoogleGenerativeAIProvider extends ProviderV3 {
289
- (modelId: GoogleGenerativeAIModelId): LanguageModelV3;
290
- languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
291
- chat(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
337
+ interface GoogleGenerativeAIProvider extends ProviderV4 {
338
+ (modelId: GoogleGenerativeAIModelId): LanguageModelV4;
339
+ languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
340
+ chat(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
292
341
  /**
293
342
  * Creates a model for image generation.
294
343
  */
295
- image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV3;
344
+ image(modelId: GoogleGenerativeAIImageModelId, settings?: GoogleGenerativeAIImageSettings): ImageModelV4;
296
345
  /**
297
346
  * @deprecated Use `chat()` instead.
298
347
  */
299
- generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV3;
348
+ generativeAI(modelId: GoogleGenerativeAIModelId): LanguageModelV4;
300
349
  /**
301
350
  * Creates a model for text embeddings.
302
351
  */
303
- embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
352
+ embedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
304
353
  /**
305
354
  * Creates a model for text embeddings.
306
355
  */
307
- embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
356
+ embeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
308
357
  /**
309
358
  * @deprecated Use `embedding` instead.
310
359
  */
311
- textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
360
+ textEmbedding(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
312
361
  /**
313
362
  * @deprecated Use `embeddingModel` instead.
314
363
  */
315
- textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV3;
364
+ textEmbeddingModel(modelId: GoogleGenerativeAIEmbeddingModelId): EmbeddingModelV4;
316
365
  /**
317
366
  * Creates a model for video generation.
318
367
  */
319
- video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
368
+ video(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
320
369
  /**
321
370
  * Creates a model for video generation.
322
371
  */
323
- videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV3;
372
+ videoModel(modelId: GoogleGenerativeAIVideoModelId): Experimental_VideoModelV4;
373
+ files(): FilesV4;
324
374
  tools: typeof googleTools;
325
375
  }
326
376
  interface GoogleGenerativeAIProviderSettings {
@@ -364,4 +414,4 @@ declare const google: GoogleGenerativeAIProvider;
364
414
 
365
415
  declare const VERSION: string;
366
416
 
367
- export { type GoogleEmbeddingModelOptions, type GoogleErrorData, type GoogleEmbeddingModelOptions as GoogleGenerativeAIEmbeddingProviderOptions, type GoogleImageModelOptions as GoogleGenerativeAIImageProviderOptions, type GoogleGenerativeAIProvider, type GoogleGenerativeAIProviderMetadata, type GoogleLanguageModelOptions as GoogleGenerativeAIProviderOptions, type GoogleGenerativeAIProviderSettings, type GoogleGenerativeAIVideoModelId, type GoogleVideoModelOptions as GoogleGenerativeAIVideoProviderOptions, type GoogleImageModelOptions, type GoogleLanguageModelOptions, type GoogleVideoModelOptions, VERSION, createGoogleGenerativeAI, google };
417
+ export { type GoogleEmbeddingModelOptions, type GoogleErrorData, type GoogleFilesUploadOptions, type GoogleEmbeddingModelOptions as GoogleGenerativeAIEmbeddingProviderOptions, type GoogleImageModelOptions as GoogleGenerativeAIImageProviderOptions, type GoogleGenerativeAIProvider, type GoogleGenerativeAIProviderMetadata, type GoogleLanguageModelOptions as GoogleGenerativeAIProviderOptions, type GoogleGenerativeAIProviderSettings, type GoogleGenerativeAIVideoModelId, type GoogleVideoModelOptions as GoogleGenerativeAIVideoProviderOptions, type GoogleImageModelOptions, type GoogleLanguageModelOptions, type GoogleVideoModelOptions, VERSION, createGoogleGenerativeAI, google };