@ai-sdk/google-vertex 5.0.0-beta.61 → 5.0.0-beta.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/CHANGELOG.md +33 -0
  2. package/dist/anthropic/edge/index.d.ts +14 -9
  3. package/dist/anthropic/edge/index.js +15 -13
  4. package/dist/anthropic/edge/index.js.map +1 -1
  5. package/dist/anthropic/index.d.ts +14 -9
  6. package/dist/anthropic/index.js +14 -12
  7. package/dist/anthropic/index.js.map +1 -1
  8. package/dist/edge/index.d.ts +3 -3
  9. package/dist/edge/index.js +197 -157
  10. package/dist/edge/index.js.map +1 -1
  11. package/dist/index.d.ts +7 -7
  12. package/dist/index.js +199 -159
  13. package/dist/index.js.map +1 -1
  14. package/dist/maas/edge/index.d.ts +3 -3
  15. package/dist/maas/edge/index.js +12 -8
  16. package/dist/maas/edge/index.js.map +1 -1
  17. package/dist/maas/index.d.ts +3 -3
  18. package/dist/maas/index.js +11 -7
  19. package/dist/maas/index.js.map +1 -1
  20. package/docs/16-google-vertex.mdx +77 -77
  21. package/package.json +7 -7
  22. package/src/anthropic/edge/google-vertex-anthropic-provider-edge.ts +16 -11
  23. package/src/anthropic/edge/index.ts +6 -2
  24. package/src/anthropic/google-vertex-anthropic-provider-node.ts +16 -11
  25. package/src/anthropic/google-vertex-anthropic-provider.ts +10 -10
  26. package/src/anthropic/index.ts +6 -2
  27. package/src/edge/google-vertex-provider-edge.ts +9 -10
  28. package/src/edge/index.ts +8 -1
  29. package/src/google-vertex-auth-google-auth-library.ts +1 -2
  30. package/src/google-vertex-config.ts +1 -1
  31. package/src/google-vertex-embedding-model.ts +13 -5
  32. package/src/google-vertex-image-model-options.ts +74 -0
  33. package/src/google-vertex-image-model.ts +66 -113
  34. package/src/google-vertex-provider-base.ts +245 -0
  35. package/src/google-vertex-provider.ts +35 -233
  36. package/src/google-vertex-video-model-options.ts +49 -0
  37. package/src/google-vertex-video-model.ts +32 -68
  38. package/src/index.ts +12 -5
  39. package/src/maas/edge/google-vertex-maas-provider-edge.ts +7 -8
  40. package/src/maas/edge/index.ts +6 -2
  41. package/src/maas/google-vertex-maas-provider-node.ts +7 -8
  42. package/src/maas/google-vertex-maas-provider.ts +7 -5
  43. package/src/maas/index.ts +6 -2
  44. package/src/google-vertex-provider-node.ts +0 -48
  45. /package/src/{google-vertex-embedding-options.ts → google-vertex-embedding-model-options.ts} +0 -0
@@ -1,25 +1,25 @@
1
1
  import {
2
- LanguageModelV4,
3
2
  NoSuchModelError,
4
- ProviderV4,
3
+ type LanguageModelV4,
4
+ type ProviderV4,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
- FetchFunction,
8
- Resolvable,
9
7
  loadOptionalSetting,
10
8
  withoutTrailingSlash,
9
+ type FetchFunction,
10
+ type Resolvable,
11
11
  } from '@ai-sdk/provider-utils';
12
12
  import {
13
13
  anthropicTools,
14
14
  AnthropicLanguageModel,
15
15
  } from '@ai-sdk/anthropic/internal';
16
- import { GoogleVertexAnthropicModelId } from './google-vertex-anthropic-options';
16
+ import type { GoogleVertexAnthropicModelId } from './google-vertex-anthropic-options';
17
17
 
18
18
  /**
19
19
  * Tools supported by Google Vertex Anthropic.
20
20
  * This is a subset of the full Anthropic tools - only these are recognized by the Vertex API.
21
21
  */
22
- export const vertexAnthropicTools = {
22
+ export const googleVertexAnthropicTools = {
23
23
  /**
24
24
  * The bash tool enables Claude to execute shell commands in a persistent bash session,
25
25
  * allowing system operations, script execution, and command-line automation.
@@ -120,7 +120,7 @@ export interface GoogleVertexAnthropicProvider extends ProviderV4 {
120
120
  * computer_20241022, webSearch_20250305, toolSearchRegex_20251119,
121
121
  * toolSearchBm25_20251119
122
122
  */
123
- tools: typeof vertexAnthropicTools;
123
+ tools: typeof googleVertexAnthropicTools;
124
124
 
125
125
  /**
126
126
  * @deprecated Use `embeddingModel` instead.
@@ -160,7 +160,7 @@ export interface GoogleVertexAnthropicProviderSettings {
160
160
  /**
161
161
  * Create a Google Vertex Anthropic provider instance.
162
162
  */
163
- export function createVertexAnthropic(
163
+ export function createGoogleVertexAnthropic(
164
164
  options: GoogleVertexAnthropicProviderSettings = {},
165
165
  ): GoogleVertexAnthropicProvider {
166
166
  const getBaseURL = () => {
@@ -181,7 +181,7 @@ export function createVertexAnthropic(
181
181
 
182
182
  const createChatModel = (modelId: GoogleVertexAnthropicModelId) =>
183
183
  new AnthropicLanguageModel(modelId, {
184
- provider: 'vertex.anthropic.messages',
184
+ provider: 'googleVertex.anthropic.messages',
185
185
  baseURL: getBaseURL(),
186
186
  headers: options.headers ?? {},
187
187
  fetch: options.fetch,
@@ -229,7 +229,7 @@ export function createVertexAnthropic(
229
229
  throw new NoSuchModelError({ modelId, modelType: 'imageModel' });
230
230
  };
231
231
 
232
- provider.tools = vertexAnthropicTools;
232
+ provider.tools = googleVertexAnthropicTools;
233
233
 
234
234
  return provider;
235
235
  }
@@ -1,6 +1,10 @@
1
1
  export {
2
- vertexAnthropic,
3
- createVertexAnthropic,
2
+ googleVertexAnthropic,
3
+ /** @deprecated Use `googleVertexAnthropic` instead. */
4
+ googleVertexAnthropic as vertexAnthropic,
5
+ createGoogleVertexAnthropic,
6
+ /** @deprecated Use `createGoogleVertexAnthropic` instead. */
7
+ createGoogleVertexAnthropic as createVertexAnthropic,
4
8
  } from './google-vertex-anthropic-provider-node';
5
9
  export type {
6
10
  GoogleVertexAnthropicProvider,
@@ -1,14 +1,13 @@
1
1
  import { loadOptionalSetting, resolve } from '@ai-sdk/provider-utils';
2
2
  import {
3
- createVertex as createVertexOriginal,
4
- GoogleVertexProvider,
5
- GoogleVertexProviderSettings as GoogleVertexProviderSettingsOriginal,
6
- } from '../google-vertex-provider';
3
+ createGoogleVertex as createGoogleVertexOriginal,
4
+ type GoogleVertexProvider,
5
+ type GoogleVertexProviderSettings as GoogleVertexProviderSettingsOriginal,
6
+ } from '../google-vertex-provider-base';
7
7
  import {
8
8
  generateAuthToken,
9
- GoogleCredentials,
9
+ type GoogleCredentials,
10
10
  } from './google-vertex-auth-edge';
11
-
12
11
  export type { GoogleVertexProvider };
13
12
 
14
13
  export interface GoogleVertexProviderSettings extends GoogleVertexProviderSettingsOriginal {
@@ -20,7 +19,7 @@ export interface GoogleVertexProviderSettings extends GoogleVertexProviderSettin
20
19
  googleCredentials?: GoogleCredentials;
21
20
  }
22
21
 
23
- export function createVertex(
22
+ export function createGoogleVertex(
24
23
  options: GoogleVertexProviderSettings = {},
25
24
  ): GoogleVertexProvider {
26
25
  const apiKey = loadOptionalSetting({
@@ -29,10 +28,10 @@ export function createVertex(
29
28
  });
30
29
 
31
30
  if (apiKey) {
32
- return createVertexOriginal(options);
31
+ return createGoogleVertexOriginal(options);
33
32
  }
34
33
 
35
- return createVertexOriginal({
34
+ return createGoogleVertexOriginal({
36
35
  ...options,
37
36
  headers: async () => ({
38
37
  Authorization: `Bearer ${await generateAuthToken(
@@ -46,4 +45,4 @@ export function createVertex(
46
45
  /**
47
46
  * Default Google Vertex AI provider instance.
48
47
  */
49
- export const vertex = createVertex();
48
+ export const googleVertex = createGoogleVertex();
package/src/edge/index.ts CHANGED
@@ -1,4 +1,11 @@
1
- export { createVertex, vertex } from './google-vertex-provider-edge';
1
+ export {
2
+ createGoogleVertex,
3
+ /** @deprecated Use `createGoogleVertex` instead. */
4
+ createGoogleVertex as createVertex,
5
+ googleVertex,
6
+ /** @deprecated Use `googleVertex` instead. */
7
+ googleVertex as vertex,
8
+ } from './google-vertex-provider-edge';
2
9
  export type {
3
10
  GoogleVertexProviderSettings,
4
11
  GoogleVertexProvider,
@@ -1,5 +1,4 @@
1
- import { GoogleAuth, GoogleAuthOptions } from 'google-auth-library';
2
-
1
+ import { GoogleAuth, type GoogleAuthOptions } from 'google-auth-library';
3
2
  let authInstance: GoogleAuth | null = null;
4
3
  let authOptions: GoogleAuthOptions | null = null;
5
4
 
@@ -1,4 +1,4 @@
1
- import { FetchFunction, Resolvable } from '@ai-sdk/provider-utils';
1
+ import type { FetchFunction, Resolvable } from '@ai-sdk/provider-utils';
2
2
 
3
3
  export interface GoogleVertexConfig {
4
4
  provider: string;
@@ -1,6 +1,6 @@
1
1
  import {
2
- EmbeddingModelV4,
3
2
  TooManyEmbeddingValuesForCallError,
3
+ type EmbeddingModelV4,
4
4
  } from '@ai-sdk/provider';
5
5
  import {
6
6
  combineHeaders,
@@ -15,10 +15,10 @@ import {
15
15
  import { z } from 'zod/v4';
16
16
  import { googleVertexFailedResponseHandler } from './google-vertex-error';
17
17
  import {
18
- GoogleVertexEmbeddingModelId,
19
18
  googleVertexEmbeddingModelOptions,
20
- } from './google-vertex-embedding-options';
21
- import { GoogleVertexConfig } from './google-vertex-config';
19
+ type GoogleVertexEmbeddingModelId,
20
+ } from './google-vertex-embedding-model-options';
21
+ import type { GoogleVertexConfig } from './google-vertex-config';
22
22
 
23
23
  export class GoogleVertexEmbeddingModel implements EmbeddingModelV4 {
24
24
  readonly specificationVersion = 'v4';
@@ -63,11 +63,19 @@ export class GoogleVertexEmbeddingModel implements EmbeddingModelV4 {
63
63
  Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>
64
64
  > {
65
65
  let googleOptions = await parseProviderOptions({
66
- provider: 'vertex',
66
+ provider: 'googleVertex',
67
67
  providerOptions,
68
68
  schema: googleVertexEmbeddingModelOptions,
69
69
  });
70
70
 
71
+ if (googleOptions == null) {
72
+ googleOptions = await parseProviderOptions({
73
+ provider: 'vertex',
74
+ providerOptions,
75
+ schema: googleVertexEmbeddingModelOptions,
76
+ });
77
+ }
78
+
71
79
  if (googleOptions == null) {
72
80
  googleOptions = await parseProviderOptions({
73
81
  provider: 'google',
@@ -0,0 +1,74 @@
1
+ import { z } from 'zod/v4';
2
+
3
+ export const googleVertexImageModelOptionsSchema = z.object({
4
+ negativePrompt: z.string().nullish(),
5
+ personGeneration: z
6
+ .enum(['dont_allow', 'allow_adult', 'allow_all'])
7
+ .nullish(),
8
+ safetySetting: z
9
+ .enum([
10
+ 'block_low_and_above',
11
+ 'block_medium_and_above',
12
+ 'block_only_high',
13
+ 'block_none',
14
+ ])
15
+ .nullish(),
16
+ addWatermark: z.boolean().nullish(),
17
+ storageUri: z.string().nullish(),
18
+ sampleImageSize: z.enum(['1K', '2K']).nullish(),
19
+ /**
20
+ * Configuration for image editing operations
21
+ */
22
+ edit: z
23
+ .object({
24
+ /**
25
+ * An integer that represents the number of sampling steps.
26
+ * A higher value offers better image quality, a lower value offers better latency.
27
+ * Try 35 steps to start. If the quality doesn't meet your requirements,
28
+ * increase the value towards an upper limit of 75.
29
+ */
30
+ baseSteps: z.number().nullish(),
31
+
32
+ // Edit mode options
33
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/image/edit-insert-objects
34
+ mode: z
35
+ .enum([
36
+ 'EDIT_MODE_INPAINT_INSERTION',
37
+ 'EDIT_MODE_INPAINT_REMOVAL',
38
+ 'EDIT_MODE_OUTPAINT',
39
+ 'EDIT_MODE_CONTROLLED_EDITING',
40
+ 'EDIT_MODE_PRODUCT_IMAGE',
41
+ 'EDIT_MODE_BGSWAP',
42
+ ])
43
+ .nullish(),
44
+
45
+ /**
46
+ * The mask mode to use.
47
+ * - `MASK_MODE_DEFAULT` - Default value for mask mode.
48
+ * - `MASK_MODE_USER_PROVIDED` - User provided mask. No segmentation needed.
49
+ * - `MASK_MODE_DETECTION_BOX` - Mask from detected bounding boxes.
50
+ * - `MASK_MODE_CLOTHING_AREA` - Masks from segmenting the clothing area with open-vocab segmentation.
51
+ * - `MASK_MODE_PARSED_PERSON` - Masks from segmenting the person body and clothing using the person-parsing model.
52
+ */
53
+ maskMode: z
54
+ .enum([
55
+ 'MASK_MODE_DEFAULT',
56
+ 'MASK_MODE_USER_PROVIDED',
57
+ 'MASK_MODE_DETECTION_BOX',
58
+ 'MASK_MODE_CLOTHING_AREA',
59
+ 'MASK_MODE_PARSED_PERSON',
60
+ ])
61
+ .nullish(),
62
+
63
+ /**
64
+ * Optional. A float value between 0 and 1, inclusive, that represents the
65
+ * percentage of the image width to grow the mask by. Using dilation helps
66
+ * compensate for imprecise masks. We recommend a value of 0.01.
67
+ */
68
+ maskDilation: z.number().nullish(),
69
+ })
70
+ .nullish(),
71
+ });
72
+ export type GoogleVertexImageModelOptions = z.infer<
73
+ typeof googleVertexImageModelOptionsSchema
74
+ >;
@@ -1,13 +1,12 @@
1
1
  import type { GoogleLanguageModelOptions } from '@ai-sdk/google';
2
2
  import { GoogleLanguageModel } from '@ai-sdk/google/internal';
3
- import {
3
+ import type {
4
4
  ImageModelV4,
5
5
  ImageModelV4File,
6
6
  LanguageModelV4Prompt,
7
7
  SharedV4Warning,
8
8
  } from '@ai-sdk/provider';
9
9
  import {
10
- Resolvable,
11
10
  combineHeaders,
12
11
  convertToBase64,
13
12
  convertUint8ArrayToBase64,
@@ -19,10 +18,12 @@ import {
19
18
  serializeModelOptions,
20
19
  WORKFLOW_SERIALIZE,
21
20
  WORKFLOW_DESERIALIZE,
21
+ type Resolvable,
22
22
  } from '@ai-sdk/provider-utils';
23
23
  import { z } from 'zod/v4';
24
24
  import { googleVertexFailedResponseHandler } from './google-vertex-error';
25
- import { GoogleVertexImageModelId } from './google-vertex-image-settings';
25
+ import { googleVertexImageModelOptionsSchema } from './google-vertex-image-model-options';
26
+ import type { GoogleVertexImageModelId } from './google-vertex-image-settings';
26
27
 
27
28
  interface GoogleVertexImageModelConfig {
28
29
  provider: string;
@@ -103,14 +104,20 @@ export class GoogleVertexImageModel implements ImageModelV4 {
103
104
  });
104
105
  }
105
106
 
106
- const vertexImageOptions = await parseProviderOptions({
107
- provider: 'vertex',
108
- providerOptions,
109
- schema: googleVertexImageModelOptionsSchema,
110
- });
107
+ const googleVertexImageOptions =
108
+ (await parseProviderOptions({
109
+ provider: 'googleVertex',
110
+ providerOptions,
111
+ schema: googleVertexImageModelOptionsSchema,
112
+ })) ??
113
+ (await parseProviderOptions({
114
+ provider: 'vertex',
115
+ providerOptions,
116
+ schema: googleVertexImageModelOptionsSchema,
117
+ }));
111
118
 
112
119
  // Extract edit-specific options from provider options
113
- const { edit, ...otherOptions } = vertexImageOptions ?? {};
120
+ const { edit, ...otherOptions } = googleVertexImageOptions ?? {};
114
121
  const { mode: editMode, baseSteps, maskMode, maskDilation } = edit ?? {};
115
122
 
116
123
  // Build the request body based on whether we're editing or generating
@@ -188,7 +195,7 @@ export class GoogleVertexImageModel implements ImageModelV4 {
188
195
  body,
189
196
  failedResponseHandler: googleVertexFailedResponseHandler,
190
197
  successfulResponseHandler: createJsonResponseHandler(
191
- vertexImageResponseSchema,
198
+ googleVertexImageResponseSchema,
192
199
  ),
193
200
  abortSignal,
194
201
  fetch: this.config.fetch,
@@ -205,8 +212,8 @@ export class GoogleVertexImageModel implements ImageModelV4 {
205
212
  modelId: this.modelId,
206
213
  headers: responseHeaders,
207
214
  },
208
- providerMetadata: {
209
- vertex: {
215
+ providerMetadata: (() => {
216
+ const payload = {
210
217
  images:
211
218
  response.predictions?.map(prediction => {
212
219
  const {
@@ -216,8 +223,9 @@ export class GoogleVertexImageModel implements ImageModelV4 {
216
223
 
217
224
  return { ...(revisedPrompt != null && { revisedPrompt }) };
218
225
  }) ?? [],
219
- },
220
- },
226
+ };
227
+ return { googleVertex: payload, vertex: payload };
228
+ })(),
221
229
  };
222
230
  }
223
231
 
@@ -260,7 +268,13 @@ export class GoogleVertexImageModel implements ImageModelV4 {
260
268
 
261
269
  const userContent: Array<
262
270
  | { type: 'text'; text: string }
263
- | { type: 'file'; data: string | Uint8Array | URL; mediaType: string }
271
+ | {
272
+ type: 'file';
273
+ data:
274
+ | { type: 'data'; data: string | Uint8Array }
275
+ | { type: 'url'; url: URL };
276
+ mediaType: string;
277
+ }
264
278
  > = [];
265
279
 
266
280
  if (prompt != null) {
@@ -272,16 +286,19 @@ export class GoogleVertexImageModel implements ImageModelV4 {
272
286
  if (file.type === 'url') {
273
287
  userContent.push({
274
288
  type: 'file',
275
- data: new URL(file.url),
289
+ data: { type: 'url', url: new URL(file.url) },
276
290
  mediaType: 'image/*',
277
291
  });
278
292
  } else {
279
293
  userContent.push({
280
294
  type: 'file',
281
- data:
282
- typeof file.data === 'string'
283
- ? file.data
284
- : new Uint8Array(file.data),
295
+ data: {
296
+ type: 'data',
297
+ data:
298
+ typeof file.data === 'string'
299
+ ? file.data
300
+ : new Uint8Array(file.data),
301
+ },
285
302
  mediaType: file.mediaType,
286
303
  });
287
304
  }
@@ -303,24 +320,27 @@ export class GoogleVertexImageModel implements ImageModelV4 {
303
320
  }),
304
321
  });
305
322
 
323
+ const userVertexOptions = (providerOptions?.googleVertex ??
324
+ providerOptions?.vertex) as
325
+ | Omit<GoogleLanguageModelOptions, 'responseModalities' | 'imageConfig'>
326
+ | undefined;
327
+ const innerVertexOptions: GoogleLanguageModelOptions = {
328
+ responseModalities: ['IMAGE'],
329
+ imageConfig: aspectRatio
330
+ ? {
331
+ aspectRatio: aspectRatio as NonNullable<
332
+ GoogleLanguageModelOptions['imageConfig']
333
+ >['aspectRatio'],
334
+ }
335
+ : undefined,
336
+ ...(userVertexOptions ?? {}),
337
+ };
306
338
  const result = await languageModel.doGenerate({
307
339
  prompt: languageModelPrompt,
308
340
  seed,
309
341
  providerOptions: {
310
- vertex: {
311
- responseModalities: ['IMAGE'],
312
- imageConfig: aspectRatio
313
- ? {
314
- aspectRatio: aspectRatio as NonNullable<
315
- GoogleLanguageModelOptions['imageConfig']
316
- >['aspectRatio'],
317
- }
318
- : undefined,
319
- ...((providerOptions?.vertex as Omit<
320
- GoogleLanguageModelOptions,
321
- 'responseModalities' | 'imageConfig'
322
- >) ?? {}),
323
- } satisfies GoogleLanguageModelOptions,
342
+ googleVertex: innerVertexOptions,
343
+ vertex: innerVertexOptions,
324
344
  },
325
345
  headers,
326
346
  abortSignal,
@@ -330,18 +350,24 @@ export class GoogleVertexImageModel implements ImageModelV4 {
330
350
 
331
351
  const images: string[] = [];
332
352
  for (const part of result.content) {
333
- if (part.type === 'file' && part.mediaType.startsWith('image/')) {
334
- images.push(convertToBase64(part.data));
353
+ if (
354
+ part.type === 'file' &&
355
+ part.mediaType.startsWith('image/') &&
356
+ part.data.type === 'data'
357
+ ) {
358
+ images.push(convertToBase64(part.data.data));
335
359
  }
336
360
  }
337
361
 
362
+ const geminiPayload = {
363
+ images: images.map(() => ({})),
364
+ };
338
365
  return {
339
366
  images,
340
367
  warnings,
341
368
  providerMetadata: {
342
- vertex: {
343
- images: images.map(() => ({})),
344
- },
369
+ googleVertex: geminiPayload,
370
+ vertex: geminiPayload,
345
371
  },
346
372
  response: {
347
373
  timestamp: currentDate,
@@ -367,7 +393,7 @@ function isGeminiModel(modelId: string): boolean {
367
393
 
368
394
  // minimal version of the schema, focussed on what is needed for the implementation
369
395
  // this approach limits breakages when the API changes and increases efficiency
370
- const vertexImageResponseSchema = z.object({
396
+ const googleVertexImageResponseSchema = z.object({
371
397
  predictions: z
372
398
  .array(
373
399
  z.object({
@@ -379,79 +405,6 @@ const vertexImageResponseSchema = z.object({
379
405
  .nullish(),
380
406
  });
381
407
 
382
- const googleVertexImageModelOptionsSchema = z.object({
383
- negativePrompt: z.string().nullish(),
384
- personGeneration: z
385
- .enum(['dont_allow', 'allow_adult', 'allow_all'])
386
- .nullish(),
387
- safetySetting: z
388
- .enum([
389
- 'block_low_and_above',
390
- 'block_medium_and_above',
391
- 'block_only_high',
392
- 'block_none',
393
- ])
394
- .nullish(),
395
- addWatermark: z.boolean().nullish(),
396
- storageUri: z.string().nullish(),
397
- sampleImageSize: z.enum(['1K', '2K']).nullish(),
398
- /**
399
- * Configuration for image editing operations
400
- */
401
- edit: z
402
- .object({
403
- /**
404
- * An integer that represents the number of sampling steps.
405
- * A higher value offers better image quality, a lower value offers better latency.
406
- * Try 35 steps to start. If the quality doesn't meet your requirements,
407
- * increase the value towards an upper limit of 75.
408
- */
409
- baseSteps: z.number().nullish(),
410
-
411
- // Edit mode options
412
- // https://cloud.google.com/vertex-ai/generative-ai/docs/image/edit-insert-objects
413
- mode: z
414
- .enum([
415
- 'EDIT_MODE_INPAINT_INSERTION',
416
- 'EDIT_MODE_INPAINT_REMOVAL',
417
- 'EDIT_MODE_OUTPAINT',
418
- 'EDIT_MODE_CONTROLLED_EDITING',
419
- 'EDIT_MODE_PRODUCT_IMAGE',
420
- 'EDIT_MODE_BGSWAP',
421
- ])
422
- .nullish(),
423
-
424
- /**
425
- * The mask mode to use.
426
- * - `MASK_MODE_DEFAULT` - Default value for mask mode.
427
- * - `MASK_MODE_USER_PROVIDED` - User provided mask. No segmentation needed.
428
- * - `MASK_MODE_DETECTION_BOX` - Mask from detected bounding boxes.
429
- * - `MASK_MODE_CLOTHING_AREA` - Masks from segmenting the clothing area with open-vocab segmentation.
430
- * - `MASK_MODE_PARSED_PERSON` - Masks from segmenting the person body and clothing using the person-parsing model.
431
- */
432
- maskMode: z
433
- .enum([
434
- 'MASK_MODE_DEFAULT',
435
- 'MASK_MODE_USER_PROVIDED',
436
- 'MASK_MODE_DETECTION_BOX',
437
- 'MASK_MODE_CLOTHING_AREA',
438
- 'MASK_MODE_PARSED_PERSON',
439
- ])
440
- .nullish(),
441
-
442
- /**
443
- * Optional. A float value between 0 and 1, inclusive, that represents the
444
- * percentage of the image width to grow the mask by. Using dilation helps
445
- * compensate for imprecise masks. We recommend a value of 0.01.
446
- */
447
- maskDilation: z.number().nullish(),
448
- })
449
- .nullish(),
450
- });
451
- export type GoogleVertexImageModelOptions = z.infer<
452
- typeof googleVertexImageModelOptionsSchema
453
- >;
454
-
455
408
  /**
456
409
  * Helper to convert ImageModelV4File data to base64 string
457
410
  */