@ai-sdk/google 3.0.24 → 3.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -108,6 +108,8 @@ Google Generative AI also supports some model specific settings that are not par
108
108
  You can pass them as an options argument:
109
109
 
110
110
  ```ts
111
+ import { google, type GoogleLanguageModelOptions } from '@ai-sdk/google';
112
+
111
113
  const model = google('gemini-2.5-flash');
112
114
 
113
115
  await generateText({
@@ -120,7 +122,7 @@ await generateText({
120
122
  threshold: 'BLOCK_LOW_AND_ABOVE',
121
123
  },
122
124
  ],
123
- },
125
+ } satisfies GoogleLanguageModelOptions,
124
126
  },
125
127
  });
126
128
  ```
@@ -256,7 +258,7 @@ The Gemini 2.5 and Gemini 3 series models use an internal "thinking process" tha
256
258
  For Gemini 3 models, use the `thinkingLevel` parameter to control the depth of reasoning:
257
259
 
258
260
  ```ts
259
- import { google, GoogleGenerativeAIProviderOptions } from '@ai-sdk/google';
261
+ import { google, GoogleLanguageModelOptions } from '@ai-sdk/google';
260
262
  import { generateText } from 'ai';
261
263
 
262
264
  const model = google('gemini-3-pro-preview');
@@ -270,7 +272,7 @@ const { text, reasoning } = await generateText({
270
272
  thinkingLevel: 'high',
271
273
  includeThoughts: true,
272
274
  },
273
- } satisfies GoogleGenerativeAIProviderOptions,
275
+ } satisfies GoogleLanguageModelOptions,
274
276
  },
275
277
  });
276
278
 
@@ -284,7 +286,7 @@ console.log(reasoning); // Reasoning summary
284
286
  For Gemini 2.5 models, use the `thinkingBudget` parameter to control the number of thinking tokens:
285
287
 
286
288
  ```ts
287
- import { google, GoogleGenerativeAIProviderOptions } from '@ai-sdk/google';
289
+ import { google, GoogleLanguageModelOptions } from '@ai-sdk/google';
288
290
  import { generateText } from 'ai';
289
291
 
290
292
  const model = google('gemini-2.5-flash');
@@ -298,7 +300,7 @@ const { text, reasoning } = await generateText({
298
300
  thinkingBudget: 8192,
299
301
  includeThoughts: true,
300
302
  },
301
- } satisfies GoogleGenerativeAIProviderOptions,
303
+ } satisfies GoogleLanguageModelOptions,
302
304
  },
303
305
  });
304
306
 
@@ -435,7 +437,7 @@ console.log('Cached tokens:', providerMetadata.google);
435
437
  For guaranteed cost savings, you can still use explicit caching with Gemini 2.5 and 2.0 models. See the [models page](https://ai.google.dev/gemini-api/docs/models) to check if caching is supported for the used model:
436
438
 
437
439
  ```ts
438
- import { google } from '@ai-sdk/google';
440
+ import { google, type GoogleLanguageModelOptions } from '@ai-sdk/google';
439
441
  import { GoogleGenAI } from '@google/genai';
440
442
  import { generateText } from 'ai';
441
443
 
@@ -465,7 +467,7 @@ const { text: veggieLasagnaRecipe } = await generateText({
465
467
  providerOptions: {
466
468
  google: {
467
469
  cachedContent: cache.name,
468
- },
470
+ } satisfies GoogleLanguageModelOptions,
469
471
  },
470
472
  });
471
473
 
@@ -475,7 +477,7 @@ const { text: meatLasagnaRecipe } = await generateText({
475
477
  providerOptions: {
476
478
  google: {
477
479
  cachedContent: cache.name,
478
- },
480
+ } satisfies GoogleLanguageModelOptions,
479
481
  },
480
482
  });
481
483
  ```
@@ -766,7 +768,7 @@ With [Google Maps grounding](https://ai.google.dev/gemini-api/docs/maps-groundin
766
768
  the model has access to Google Maps data for location-aware responses. This enables providing local data and geospatial context, such as finding nearby restaurants.
767
769
 
768
770
  ```ts highlight="7-16"
769
- import { google } from '@ai-sdk/google';
771
+ import { google, type GoogleLanguageModelOptions } from '@ai-sdk/google';
770
772
  import { GoogleGenerativeAIProviderMetadata } from '@ai-sdk/google';
771
773
  import { generateText } from 'ai';
772
774
 
@@ -780,7 +782,7 @@ const { text, sources, providerMetadata } = await generateText({
780
782
  retrievalConfig: {
781
783
  latLng: { latitude: 34.090199, longitude: -117.881081 },
782
784
  },
783
- },
785
+ } satisfies GoogleLanguageModelOptions,
784
786
  },
785
787
  prompt:
786
788
  'What are the best Italian restaurants within a 15-minute walk from here?',
@@ -1008,7 +1010,7 @@ const { object } = await generateObject({
1008
1010
  providerOptions: {
1009
1011
  google: {
1010
1012
  structuredOutputs: false,
1011
- },
1013
+ } satisfies GoogleLanguageModelOptions,
1012
1014
  },
1013
1015
  schema: z.object({
1014
1016
  name: z.string(),
@@ -1099,7 +1101,7 @@ The Google Generative AI provider sends API calls to the right endpoint based on
1099
1101
  Google Generative AI embedding models support aditional settings. You can pass them as an options argument:
1100
1102
 
1101
1103
  ```ts
1102
- import { google } from '@ai-sdk/google';
1104
+ import { google, type GoogleEmbeddingModelOptions } from '@ai-sdk/google';
1103
1105
  import { embed } from 'ai';
1104
1106
 
1105
1107
  const model = google.embedding('gemini-embedding-001');
@@ -1111,7 +1113,7 @@ const { embedding } = await embed({
1111
1113
  google: {
1112
1114
  outputDimensionality: 512, // optional, number of dimensions for the embedding
1113
1115
  taskType: 'SEMANTIC_SIMILARITY', // optional, specifies the task type for generating embeddings
1114
- },
1116
+ } satisfies GoogleEmbeddingModelOptions,
1115
1117
  },
1116
1118
  });
1117
1119
  ```
@@ -1158,11 +1160,11 @@ const { image } = await generateImage({
1158
1160
  });
1159
1161
  ```
1160
1162
 
1161
- Further configuration can be done using Google provider options. You can validate the provider options using the `GoogleGenerativeAIImageProviderOptions` type.
1163
+ Further configuration can be done using Google provider options. You can validate the provider options using the `GoogleImageModelOptions` type.
1162
1164
 
1163
1165
  ```ts
1164
1166
  import { google } from '@ai-sdk/google';
1165
- import { GoogleGenerativeAIImageProviderOptions } from '@ai-sdk/google';
1167
+ import { GoogleImageModelOptions } from '@ai-sdk/google';
1166
1168
  import { generateImage } from 'ai';
1167
1169
 
1168
1170
  const { image } = await generateImage({
@@ -1170,7 +1172,7 @@ const { image } = await generateImage({
1170
1172
  providerOptions: {
1171
1173
  google: {
1172
1174
  personGeneration: 'dont_allow',
1173
- } satisfies GoogleGenerativeAIImageProviderOptions,
1175
+ } satisfies GoogleImageModelOptions,
1174
1176
  },
1175
1177
  // ...
1176
1178
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/google",
3
- "version": "3.0.24",
3
+ "version": "3.0.25",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -16,7 +16,7 @@ import { z } from 'zod/v4';
16
16
  import { googleFailedResponseHandler } from './google-error';
17
17
  import {
18
18
  GoogleGenerativeAIEmbeddingModelId,
19
- googleGenerativeAIEmbeddingProviderOptions,
19
+ googleEmbeddingModelOptions,
20
20
  } from './google-generative-ai-embedding-options';
21
21
 
22
22
  type GoogleGenerativeAIEmbeddingConfig = {
@@ -57,7 +57,7 @@ export class GoogleGenerativeAIEmbeddingModel implements EmbeddingModelV3 {
57
57
  const googleOptions = await parseProviderOptions({
58
58
  provider: 'google',
59
59
  providerOptions,
60
- schema: googleGenerativeAIEmbeddingProviderOptions,
60
+ schema: googleEmbeddingModelOptions,
61
61
  });
62
62
 
63
63
  if (values.length > this.maxEmbeddingsPerCall) {
@@ -10,7 +10,7 @@ export type GoogleGenerativeAIEmbeddingModelId =
10
10
  | 'text-embedding-004'
11
11
  | (string & {});
12
12
 
13
- export const googleGenerativeAIEmbeddingProviderOptions = lazySchema(() =>
13
+ export const googleEmbeddingModelOptions = lazySchema(() =>
14
14
  zodSchema(
15
15
  z.object({
16
16
  /**
@@ -47,6 +47,6 @@ export const googleGenerativeAIEmbeddingProviderOptions = lazySchema(() =>
47
47
  ),
48
48
  );
49
49
 
50
- export type GoogleGenerativeAIEmbeddingProviderOptions = InferSchema<
51
- typeof googleGenerativeAIEmbeddingProviderOptions
50
+ export type GoogleEmbeddingModelOptions = InferSchema<
51
+ typeof googleEmbeddingModelOptions
52
52
  >;
@@ -99,7 +99,7 @@ export class GoogleGenerativeAIImageModel implements ImageModelV3 {
99
99
  const googleOptions = await parseProviderOptions({
100
100
  provider: 'google',
101
101
  providerOptions,
102
- schema: googleImageProviderOptionsSchema,
102
+ schema: googleImageModelOptionsSchema,
103
103
  });
104
104
 
105
105
  const currentDate = this.config._internal?.currentDate?.() ?? new Date();
@@ -168,7 +168,7 @@ const googleImageResponseSchema = lazySchema(() =>
168
168
 
169
169
  // Note: For the initial GA launch of Imagen 3, safety filters are not configurable.
170
170
  // https://ai.google.dev/gemini-api/docs/imagen#imagen-model
171
- const googleImageProviderOptionsSchema = lazySchema(() =>
171
+ const googleImageModelOptionsSchema = lazySchema(() =>
172
172
  zodSchema(
173
173
  z.object({
174
174
  personGeneration: z
@@ -179,6 +179,6 @@ const googleImageProviderOptionsSchema = lazySchema(() =>
179
179
  ),
180
180
  );
181
181
 
182
- export type GoogleGenerativeAIImageProviderOptions = InferSchema<
183
- typeof googleImageProviderOptionsSchema
182
+ export type GoogleImageModelOptions = InferSchema<
183
+ typeof googleImageModelOptionsSchema
184
184
  >;
@@ -36,7 +36,7 @@ import { getModelPath } from './get-model-path';
36
36
  import { googleFailedResponseHandler } from './google-error';
37
37
  import {
38
38
  GoogleGenerativeAIModelId,
39
- googleGenerativeAIProviderOptions,
39
+ googleLanguageModelOptions,
40
40
  } from './google-generative-ai-options';
41
41
  import { GoogleGenerativeAIContentPart } from './google-generative-ai-prompt';
42
42
  import { prepareTools } from './google-prepare-tools';
@@ -103,14 +103,14 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
103
103
  let googleOptions = await parseProviderOptions({
104
104
  provider: providerOptionsName,
105
105
  providerOptions,
106
- schema: googleGenerativeAIProviderOptions,
106
+ schema: googleLanguageModelOptions,
107
107
  });
108
108
 
109
109
  if (googleOptions == null && providerOptionsName !== 'google') {
110
110
  googleOptions = await parseProviderOptions({
111
111
  provider: 'google',
112
112
  providerOptions,
113
- schema: googleGenerativeAIProviderOptions,
113
+ schema: googleLanguageModelOptions,
114
114
  });
115
115
  }
116
116
 
@@ -45,7 +45,7 @@ export type GoogleGenerativeAIModelId =
45
45
  | 'gemma-3-27b-it'
46
46
  | (string & {});
47
47
 
48
- export const googleGenerativeAIProviderOptions = lazySchema(() =>
48
+ export const googleLanguageModelOptions = lazySchema(() =>
49
49
  zodSchema(
50
50
  z.object({
51
51
  responseModalities: z.array(z.enum(['TEXT', 'IMAGE'])).optional(),
@@ -188,6 +188,6 @@ export const googleGenerativeAIProviderOptions = lazySchema(() =>
188
188
  ),
189
189
  );
190
190
 
191
- export type GoogleGenerativeAIProviderOptions = InferSchema<
192
- typeof googleGenerativeAIProviderOptions
191
+ export type GoogleLanguageModelOptions = InferSchema<
192
+ typeof googleLanguageModelOptions
193
193
  >;
@@ -21,7 +21,7 @@ import { z } from 'zod/v4';
21
21
  import { googleFailedResponseHandler } from './google-error';
22
22
  import type { GoogleGenerativeAIVideoModelId } from './google-generative-ai-video-settings';
23
23
 
24
- export type GoogleGenerativeAIVideoProviderOptions = {
24
+ export type GoogleVideoModelOptions = {
25
25
  // Polling configuration
26
26
  pollIntervalMs?: number | null;
27
27
  pollTimeoutMs?: number | null;
@@ -76,8 +76,8 @@ export class GoogleGenerativeAIVideoModel implements Experimental_VideoModelV3 {
76
76
  const googleOptions = (await parseProviderOptions({
77
77
  provider: 'google',
78
78
  providerOptions: options.providerOptions,
79
- schema: googleVideoProviderOptionsSchema,
80
- })) as GoogleGenerativeAIVideoProviderOptions | undefined;
79
+ schema: googleVideoModelOptionsSchema,
80
+ })) as GoogleVideoModelOptions | undefined;
81
81
 
82
82
  const instances: Array<Record<string, unknown>> = [{}];
83
83
  const instance = instances[0];
@@ -155,7 +155,7 @@ export class GoogleGenerativeAIVideoModel implements Experimental_VideoModelV3 {
155
155
  }
156
156
 
157
157
  if (googleOptions != null) {
158
- const opts = googleOptions as GoogleGenerativeAIVideoProviderOptions;
158
+ const opts = googleOptions as GoogleVideoModelOptions;
159
159
 
160
160
  if (
161
161
  opts.personGeneration !== undefined &&
@@ -350,7 +350,7 @@ const googleOperationSchema = z.object({
350
350
  .nullish(),
351
351
  });
352
352
 
353
- const googleVideoProviderOptionsSchema = lazySchema(() =>
353
+ const googleVideoModelOptionsSchema = lazySchema(() =>
354
354
  zodSchema(
355
355
  z
356
356
  .object({
package/src/index.ts CHANGED
@@ -1,9 +1,25 @@
1
1
  export type { GoogleErrorData } from './google-error';
2
- export type { GoogleGenerativeAIProviderOptions } from './google-generative-ai-options';
2
+ export type {
3
+ GoogleLanguageModelOptions,
4
+ /** @deprecated Use `GoogleLanguageModelOptions` instead. */
5
+ GoogleLanguageModelOptions as GoogleGenerativeAIProviderOptions,
6
+ } from './google-generative-ai-options';
3
7
  export type { GoogleGenerativeAIProviderMetadata } from './google-generative-ai-prompt';
4
- export type { GoogleGenerativeAIImageProviderOptions } from './google-generative-ai-image-model';
5
- export type { GoogleGenerativeAIEmbeddingProviderOptions } from './google-generative-ai-embedding-options';
6
- export type { GoogleGenerativeAIVideoProviderOptions } from './google-generative-ai-video-model';
8
+ export type {
9
+ GoogleImageModelOptions,
10
+ /** @deprecated Use `GoogleImageModelOptions` instead. */
11
+ GoogleImageModelOptions as GoogleGenerativeAIImageProviderOptions,
12
+ } from './google-generative-ai-image-model';
13
+ export type {
14
+ GoogleEmbeddingModelOptions,
15
+ /** @deprecated Use `GoogleEmbeddingModelOptions` instead. */
16
+ GoogleEmbeddingModelOptions as GoogleGenerativeAIEmbeddingProviderOptions,
17
+ } from './google-generative-ai-embedding-options';
18
+ export type {
19
+ GoogleVideoModelOptions,
20
+ /** @deprecated Use `GoogleVideoModelOptions` instead. */
21
+ GoogleVideoModelOptions as GoogleGenerativeAIVideoProviderOptions,
22
+ } from './google-generative-ai-video-model';
7
23
  export type { GoogleGenerativeAIVideoModelId } from './google-generative-ai-video-settings';
8
24
  export { createGoogleGenerativeAI, google } from './google-provider';
9
25
  export type {