@ai-sdk/google-vertex 5.0.0-beta.62 → 5.0.0-beta.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/dist/anthropic/edge/index.d.ts +10 -5
  3. package/dist/anthropic/edge/index.js +15 -13
  4. package/dist/anthropic/edge/index.js.map +1 -1
  5. package/dist/anthropic/index.d.ts +10 -5
  6. package/dist/anthropic/index.js +14 -12
  7. package/dist/anthropic/index.js.map +1 -1
  8. package/dist/edge/index.d.ts +3 -3
  9. package/dist/edge/index.js +190 -153
  10. package/dist/edge/index.js.map +1 -1
  11. package/dist/index.d.ts +7 -7
  12. package/dist/index.js +192 -155
  13. package/dist/index.js.map +1 -1
  14. package/dist/maas/edge/index.d.ts +3 -3
  15. package/dist/maas/edge/index.js +9 -7
  16. package/dist/maas/edge/index.js.map +1 -1
  17. package/dist/maas/index.d.ts +3 -3
  18. package/dist/maas/index.js +8 -6
  19. package/dist/maas/index.js.map +1 -1
  20. package/docs/16-google-vertex.mdx +75 -75
  21. package/package.json +5 -5
  22. package/src/anthropic/edge/google-vertex-anthropic-provider-edge.ts +13 -7
  23. package/src/anthropic/edge/index.ts +6 -2
  24. package/src/anthropic/google-vertex-anthropic-provider-node.ts +13 -7
  25. package/src/anthropic/google-vertex-anthropic-provider.ts +5 -5
  26. package/src/anthropic/index.ts +6 -2
  27. package/src/edge/google-vertex-provider-edge.ts +6 -6
  28. package/src/edge/index.ts +8 -1
  29. package/src/google-vertex-embedding-model.ts +10 -2
  30. package/src/google-vertex-image-model-options.ts +74 -0
  31. package/src/google-vertex-image-model.ts +42 -102
  32. package/src/google-vertex-provider-base.ts +245 -0
  33. package/src/google-vertex-provider.ts +35 -233
  34. package/src/google-vertex-video-model-options.ts +49 -0
  35. package/src/google-vertex-video-model.ts +30 -66
  36. package/src/index.ts +12 -5
  37. package/src/maas/edge/google-vertex-maas-provider-edge.ts +3 -3
  38. package/src/maas/edge/index.ts +6 -2
  39. package/src/maas/google-vertex-maas-provider-node.ts +3 -3
  40. package/src/maas/google-vertex-maas-provider.ts +1 -1
  41. package/src/maas/index.ts +6 -2
  42. package/src/google-vertex-provider-node.ts +0 -47
  43. /package/src/{google-vertex-embedding-options.ts → google-vertex-embedding-model-options.ts} +0 -0
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
- // src/google-vertex-provider-node.ts
1
+ // src/google-vertex-provider.ts
2
2
  import { loadOptionalSetting as loadOptionalSetting2, resolve as resolve5 } from "@ai-sdk/provider-utils";
3
3
 
4
4
  // src/google-vertex-auth-google-auth-library.ts
@@ -22,7 +22,7 @@ async function generateAuthToken(options) {
22
22
  return (token == null ? void 0 : token.token) || null;
23
23
  }
24
24
 
25
- // src/google-vertex-provider.ts
25
+ // src/google-vertex-provider-base.ts
26
26
  import { GoogleLanguageModel as GoogleLanguageModel2 } from "@ai-sdk/google/internal";
27
27
  import {
28
28
  generateId,
@@ -35,7 +35,7 @@ import {
35
35
  } from "@ai-sdk/provider-utils";
36
36
 
37
37
  // src/version.ts
38
- var VERSION = true ? "5.0.0-beta.62" : "0.0.0-test";
38
+ var VERSION = true ? "5.0.0-beta.63" : "0.0.0-test";
39
39
 
40
40
  // src/google-vertex-embedding-model.ts
41
41
  import {
@@ -70,7 +70,7 @@ var googleVertexFailedResponseHandler = createJsonErrorResponseHandler(
70
70
  }
71
71
  );
72
72
 
73
- // src/google-vertex-embedding-options.ts
73
+ // src/google-vertex-embedding-model-options.ts
74
74
  import { z as z2 } from "zod/v4";
75
75
  var googleVertexEmbeddingModelOptions = z2.object({
76
76
  /**
@@ -141,10 +141,17 @@ var GoogleVertexEmbeddingModel = class _GoogleVertexEmbeddingModel {
141
141
  providerOptions
142
142
  }) {
143
143
  let googleOptions = await parseProviderOptions({
144
- provider: "vertex",
144
+ provider: "googleVertex",
145
145
  providerOptions,
146
146
  schema: googleVertexEmbeddingModelOptions
147
147
  });
148
+ if (googleOptions == null) {
149
+ googleOptions = await parseProviderOptions({
150
+ provider: "vertex",
151
+ providerOptions,
152
+ schema: googleVertexEmbeddingModelOptions
153
+ });
154
+ }
148
155
  if (googleOptions == null) {
149
156
  googleOptions = await parseProviderOptions({
150
157
  provider: "google",
@@ -234,7 +241,68 @@ import {
234
241
  WORKFLOW_SERIALIZE as WORKFLOW_SERIALIZE2,
235
242
  WORKFLOW_DESERIALIZE as WORKFLOW_DESERIALIZE2
236
243
  } from "@ai-sdk/provider-utils";
244
+ import { z as z5 } from "zod/v4";
245
+
246
+ // src/google-vertex-image-model-options.ts
237
247
  import { z as z4 } from "zod/v4";
248
+ var googleVertexImageModelOptionsSchema = z4.object({
249
+ negativePrompt: z4.string().nullish(),
250
+ personGeneration: z4.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
251
+ safetySetting: z4.enum([
252
+ "block_low_and_above",
253
+ "block_medium_and_above",
254
+ "block_only_high",
255
+ "block_none"
256
+ ]).nullish(),
257
+ addWatermark: z4.boolean().nullish(),
258
+ storageUri: z4.string().nullish(),
259
+ sampleImageSize: z4.enum(["1K", "2K"]).nullish(),
260
+ /**
261
+ * Configuration for image editing operations
262
+ */
263
+ edit: z4.object({
264
+ /**
265
+ * An integer that represents the number of sampling steps.
266
+ * A higher value offers better image quality, a lower value offers better latency.
267
+ * Try 35 steps to start. If the quality doesn't meet your requirements,
268
+ * increase the value towards an upper limit of 75.
269
+ */
270
+ baseSteps: z4.number().nullish(),
271
+ // Edit mode options
272
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/image/edit-insert-objects
273
+ mode: z4.enum([
274
+ "EDIT_MODE_INPAINT_INSERTION",
275
+ "EDIT_MODE_INPAINT_REMOVAL",
276
+ "EDIT_MODE_OUTPAINT",
277
+ "EDIT_MODE_CONTROLLED_EDITING",
278
+ "EDIT_MODE_PRODUCT_IMAGE",
279
+ "EDIT_MODE_BGSWAP"
280
+ ]).nullish(),
281
+ /**
282
+ * The mask mode to use.
283
+ * - `MASK_MODE_DEFAULT` - Default value for mask mode.
284
+ * - `MASK_MODE_USER_PROVIDED` - User provided mask. No segmentation needed.
285
+ * - `MASK_MODE_DETECTION_BOX` - Mask from detected bounding boxes.
286
+ * - `MASK_MODE_CLOTHING_AREA` - Masks from segmenting the clothing area with open-vocab segmentation.
287
+ * - `MASK_MODE_PARSED_PERSON` - Masks from segmenting the person body and clothing using the person-parsing model.
288
+ */
289
+ maskMode: z4.enum([
290
+ "MASK_MODE_DEFAULT",
291
+ "MASK_MODE_USER_PROVIDED",
292
+ "MASK_MODE_DETECTION_BOX",
293
+ "MASK_MODE_CLOTHING_AREA",
294
+ "MASK_MODE_PARSED_PERSON"
295
+ ]).nullish(),
296
+ /**
297
+ * Optional. A float value between 0 and 1, inclusive, that represents the
298
+ * percentage of the image width to grow the mask by. Using dilation helps
299
+ * compensate for imprecise masks. We recommend a value of 0.01.
300
+ */
301
+ maskDilation: z4.number().nullish()
302
+ }).nullish()
303
+ });
304
+
305
+ // src/google-vertex-image-model.ts
238
306
  var GoogleVertexImageModel = class _GoogleVertexImageModel {
239
307
  constructor(modelId, config) {
240
308
  this.modelId = modelId;
@@ -277,7 +345,7 @@ var GoogleVertexImageModel = class _GoogleVertexImageModel {
277
345
  files,
278
346
  mask
279
347
  }) {
280
- var _a, _b, _c, _d, _e, _f, _g;
348
+ var _a, _b, _c, _d, _e, _f;
281
349
  const warnings = [];
282
350
  if (size != null) {
283
351
  warnings.push({
@@ -286,12 +354,16 @@ var GoogleVertexImageModel = class _GoogleVertexImageModel {
286
354
  details: "This model does not support the `size` option. Use `aspectRatio` instead."
287
355
  });
288
356
  }
289
- const vertexImageOptions = await parseProviderOptions2({
357
+ const googleVertexImageOptions = (_a = await parseProviderOptions2({
358
+ provider: "googleVertex",
359
+ providerOptions,
360
+ schema: googleVertexImageModelOptionsSchema
361
+ })) != null ? _a : await parseProviderOptions2({
290
362
  provider: "vertex",
291
363
  providerOptions,
292
364
  schema: googleVertexImageModelOptionsSchema
293
365
  });
294
- const { edit, ...otherOptions } = vertexImageOptions != null ? vertexImageOptions : {};
366
+ const { edit, ...otherOptions } = googleVertexImageOptions != null ? googleVertexImageOptions : {};
295
367
  const { mode: editMode, baseSteps, maskMode, maskDilation } = edit != null ? edit : {};
296
368
  const isEditMode = files != null && files.length > 0;
297
369
  let body;
@@ -347,7 +419,7 @@ var GoogleVertexImageModel = class _GoogleVertexImageModel {
347
419
  }
348
420
  };
349
421
  }
350
- const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
422
+ const currentDate = (_d = (_c = (_b = this.config._internal) == null ? void 0 : _b.currentDate) == null ? void 0 : _c.call(_b)) != null ? _d : /* @__PURE__ */ new Date();
351
423
  const { value: response, responseHeaders } = await postJsonToApi2({
352
424
  url: `${this.config.baseURL}/models/${this.modelId}:predict`,
353
425
  headers: combineHeaders2(
@@ -357,32 +429,34 @@ var GoogleVertexImageModel = class _GoogleVertexImageModel {
357
429
  body,
358
430
  failedResponseHandler: googleVertexFailedResponseHandler,
359
431
  successfulResponseHandler: createJsonResponseHandler2(
360
- vertexImageResponseSchema
432
+ googleVertexImageResponseSchema
361
433
  ),
362
434
  abortSignal,
363
435
  fetch: this.config.fetch
364
436
  });
365
437
  return {
366
- images: (_e = (_d = response.predictions) == null ? void 0 : _d.map(
438
+ images: (_f = (_e = response.predictions) == null ? void 0 : _e.map(
367
439
  ({ bytesBase64Encoded }) => bytesBase64Encoded
368
- )) != null ? _e : [],
440
+ )) != null ? _f : [],
369
441
  warnings,
370
442
  response: {
371
443
  timestamp: currentDate,
372
444
  modelId: this.modelId,
373
445
  headers: responseHeaders
374
446
  },
375
- providerMetadata: {
376
- vertex: {
377
- images: (_g = (_f = response.predictions) == null ? void 0 : _f.map((prediction) => {
447
+ providerMetadata: (() => {
448
+ var _a2, _b2;
449
+ const payload = {
450
+ images: (_b2 = (_a2 = response.predictions) == null ? void 0 : _a2.map((prediction) => {
378
451
  const {
379
452
  // normalize revised prompt property
380
453
  prompt: revisedPrompt
381
454
  } = prediction;
382
455
  return { ...revisedPrompt != null && { revisedPrompt } };
383
- })) != null ? _g : []
384
- }
385
- }
456
+ })) != null ? _b2 : []
457
+ };
458
+ return { googleVertex: payload, vertex: payload };
459
+ })()
386
460
  };
387
461
  }
388
462
  async doGenerateGemini({
@@ -453,17 +527,20 @@ var GoogleVertexImageModel = class _GoogleVertexImageModel {
453
527
  "*": [/^https?:\/\/.*$/, /^gs:\/\/.*$/]
454
528
  })
455
529
  });
530
+ const userVertexOptions = (_c = providerOptions == null ? void 0 : providerOptions.googleVertex) != null ? _c : providerOptions == null ? void 0 : providerOptions.vertex;
531
+ const innerVertexOptions = {
532
+ responseModalities: ["IMAGE"],
533
+ imageConfig: aspectRatio ? {
534
+ aspectRatio
535
+ } : void 0,
536
+ ...userVertexOptions != null ? userVertexOptions : {}
537
+ };
456
538
  const result = await languageModel.doGenerate({
457
539
  prompt: languageModelPrompt,
458
540
  seed,
459
541
  providerOptions: {
460
- vertex: {
461
- responseModalities: ["IMAGE"],
462
- imageConfig: aspectRatio ? {
463
- aspectRatio
464
- } : void 0,
465
- ...(_c = providerOptions == null ? void 0 : providerOptions.vertex) != null ? _c : {}
466
- }
542
+ googleVertex: innerVertexOptions,
543
+ vertex: innerVertexOptions
467
544
  },
468
545
  headers,
469
546
  abortSignal
@@ -475,13 +552,15 @@ var GoogleVertexImageModel = class _GoogleVertexImageModel {
475
552
  images.push(convertToBase64(part.data.data));
476
553
  }
477
554
  }
555
+ const geminiPayload = {
556
+ images: images.map(() => ({}))
557
+ };
478
558
  return {
479
559
  images,
480
560
  warnings,
481
561
  providerMetadata: {
482
- vertex: {
483
- images: images.map(() => ({}))
484
- }
562
+ googleVertex: geminiPayload,
563
+ vertex: geminiPayload
485
564
  },
486
565
  response: {
487
566
  timestamp: currentDate,
@@ -499,71 +578,15 @@ var GoogleVertexImageModel = class _GoogleVertexImageModel {
499
578
  function isGeminiModel(modelId) {
500
579
  return modelId.startsWith("gemini-");
501
580
  }
502
- var vertexImageResponseSchema = z4.object({
503
- predictions: z4.array(
504
- z4.object({
505
- bytesBase64Encoded: z4.string(),
506
- mimeType: z4.string(),
507
- prompt: z4.string().nullish()
581
+ var googleVertexImageResponseSchema = z5.object({
582
+ predictions: z5.array(
583
+ z5.object({
584
+ bytesBase64Encoded: z5.string(),
585
+ mimeType: z5.string(),
586
+ prompt: z5.string().nullish()
508
587
  })
509
588
  ).nullish()
510
589
  });
511
- var googleVertexImageModelOptionsSchema = z4.object({
512
- negativePrompt: z4.string().nullish(),
513
- personGeneration: z4.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
514
- safetySetting: z4.enum([
515
- "block_low_and_above",
516
- "block_medium_and_above",
517
- "block_only_high",
518
- "block_none"
519
- ]).nullish(),
520
- addWatermark: z4.boolean().nullish(),
521
- storageUri: z4.string().nullish(),
522
- sampleImageSize: z4.enum(["1K", "2K"]).nullish(),
523
- /**
524
- * Configuration for image editing operations
525
- */
526
- edit: z4.object({
527
- /**
528
- * An integer that represents the number of sampling steps.
529
- * A higher value offers better image quality, a lower value offers better latency.
530
- * Try 35 steps to start. If the quality doesn't meet your requirements,
531
- * increase the value towards an upper limit of 75.
532
- */
533
- baseSteps: z4.number().nullish(),
534
- // Edit mode options
535
- // https://cloud.google.com/vertex-ai/generative-ai/docs/image/edit-insert-objects
536
- mode: z4.enum([
537
- "EDIT_MODE_INPAINT_INSERTION",
538
- "EDIT_MODE_INPAINT_REMOVAL",
539
- "EDIT_MODE_OUTPAINT",
540
- "EDIT_MODE_CONTROLLED_EDITING",
541
- "EDIT_MODE_PRODUCT_IMAGE",
542
- "EDIT_MODE_BGSWAP"
543
- ]).nullish(),
544
- /**
545
- * The mask mode to use.
546
- * - `MASK_MODE_DEFAULT` - Default value for mask mode.
547
- * - `MASK_MODE_USER_PROVIDED` - User provided mask. No segmentation needed.
548
- * - `MASK_MODE_DETECTION_BOX` - Mask from detected bounding boxes.
549
- * - `MASK_MODE_CLOTHING_AREA` - Masks from segmenting the clothing area with open-vocab segmentation.
550
- * - `MASK_MODE_PARSED_PERSON` - Masks from segmenting the person body and clothing using the person-parsing model.
551
- */
552
- maskMode: z4.enum([
553
- "MASK_MODE_DEFAULT",
554
- "MASK_MODE_USER_PROVIDED",
555
- "MASK_MODE_DETECTION_BOX",
556
- "MASK_MODE_CLOTHING_AREA",
557
- "MASK_MODE_PARSED_PERSON"
558
- ]).nullish(),
559
- /**
560
- * Optional. A float value between 0 and 1, inclusive, that represents the
561
- * percentage of the image width to grow the mask by. Using dilation helps
562
- * compensate for imprecise masks. We recommend a value of 0.01.
563
- */
564
- maskDilation: z4.number().nullish()
565
- }).nullish()
566
- });
567
590
  function getBase64Data(file) {
568
591
  if (file.type === "url") {
569
592
  throw new Error(
@@ -597,13 +620,35 @@ import {
597
620
  convertUint8ArrayToBase64 as convertUint8ArrayToBase642,
598
621
  createJsonResponseHandler as createJsonResponseHandler3,
599
622
  delay,
600
- lazySchema,
601
623
  parseProviderOptions as parseProviderOptions3,
602
624
  postJsonToApi as postJsonToApi3,
603
- resolve as resolve3,
604
- zodSchema
625
+ resolve as resolve3
605
626
  } from "@ai-sdk/provider-utils";
606
- import { z as z5 } from "zod/v4";
627
+ import { z as z7 } from "zod/v4";
628
+
629
+ // src/google-vertex-video-model-options.ts
630
+ import { lazySchema, zodSchema } from "@ai-sdk/provider-utils";
631
+ import { z as z6 } from "zod/v4";
632
+ var googleVertexVideoModelOptionsSchema = lazySchema(
633
+ () => zodSchema(
634
+ z6.object({
635
+ pollIntervalMs: z6.number().positive().nullish(),
636
+ pollTimeoutMs: z6.number().positive().nullish(),
637
+ personGeneration: z6.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
638
+ negativePrompt: z6.string().nullish(),
639
+ generateAudio: z6.boolean().nullish(),
640
+ gcsOutputDirectory: z6.string().nullish(),
641
+ referenceImages: z6.array(
642
+ z6.object({
643
+ bytesBase64Encoded: z6.string().nullish(),
644
+ gcsUri: z6.string().nullish()
645
+ })
646
+ ).nullish()
647
+ }).passthrough()
648
+ )
649
+ );
650
+
651
+ // src/google-vertex-video-model.ts
607
652
  var GoogleVertexVideoModel = class {
608
653
  constructor(modelId, config) {
609
654
  this.modelId = modelId;
@@ -617,10 +662,14 @@ var GoogleVertexVideoModel = class {
617
662
  return 4;
618
663
  }
619
664
  async doGenerate(options) {
620
- var _a, _b, _c, _d, _e, _f;
665
+ var _a, _b, _c, _d, _e, _f, _g;
621
666
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
622
667
  const warnings = [];
623
- const vertexOptions = await parseProviderOptions3({
668
+ const googleVertexOptions = (_d = await parseProviderOptions3({
669
+ provider: "googleVertex",
670
+ providerOptions: options.providerOptions,
671
+ schema: googleVertexVideoModelOptionsSchema
672
+ })) != null ? _d : await parseProviderOptions3({
624
673
  provider: "vertex",
625
674
  providerOptions: options.providerOptions,
626
675
  schema: googleVertexVideoModelOptionsSchema
@@ -645,8 +694,8 @@ var GoogleVertexVideoModel = class {
645
694
  };
646
695
  }
647
696
  }
648
- if ((vertexOptions == null ? void 0 : vertexOptions.referenceImages) != null) {
649
- instance.referenceImages = vertexOptions.referenceImages;
697
+ if ((googleVertexOptions == null ? void 0 : googleVertexOptions.referenceImages) != null) {
698
+ instance.referenceImages = googleVertexOptions.referenceImages;
650
699
  }
651
700
  const parameters = {
652
701
  sampleCount: options.n
@@ -668,8 +717,8 @@ var GoogleVertexVideoModel = class {
668
717
  if (options.seed) {
669
718
  parameters.seed = options.seed;
670
719
  }
671
- if (vertexOptions != null) {
672
- const opts = vertexOptions;
720
+ if (googleVertexOptions != null) {
721
+ const opts = googleVertexOptions;
673
722
  if (opts.personGeneration !== void 0 && opts.personGeneration !== null) {
674
723
  parameters.personGeneration = opts.personGeneration;
675
724
  }
@@ -707,7 +756,7 @@ var GoogleVertexVideoModel = class {
707
756
  parameters
708
757
  },
709
758
  successfulResponseHandler: createJsonResponseHandler3(
710
- vertexOperationSchema
759
+ googleVertexOperationSchema
711
760
  ),
712
761
  failedResponseHandler: googleVertexFailedResponseHandler,
713
762
  abortSignal: options.abortSignal,
@@ -720,8 +769,8 @@ var GoogleVertexVideoModel = class {
720
769
  message: "No operation name returned from API"
721
770
  });
722
771
  }
723
- const pollIntervalMs = (_d = vertexOptions == null ? void 0 : vertexOptions.pollIntervalMs) != null ? _d : 1e4;
724
- const pollTimeoutMs = (_e = vertexOptions == null ? void 0 : vertexOptions.pollTimeoutMs) != null ? _e : 6e5;
772
+ const pollIntervalMs = (_e = googleVertexOptions == null ? void 0 : googleVertexOptions.pollIntervalMs) != null ? _e : 1e4;
773
+ const pollTimeoutMs = (_f = googleVertexOptions == null ? void 0 : googleVertexOptions.pollTimeoutMs) != null ? _f : 6e5;
725
774
  const startTime = Date.now();
726
775
  let finalOperation = operation;
727
776
  let responseHeaders;
@@ -733,7 +782,7 @@ var GoogleVertexVideoModel = class {
733
782
  });
734
783
  }
735
784
  await delay(pollIntervalMs);
736
- if ((_f = options.abortSignal) == null ? void 0 : _f.aborted) {
785
+ if ((_g = options.abortSignal) == null ? void 0 : _g.aborted) {
737
786
  throw new AISDKError({
738
787
  name: "VERTEX_VIDEO_GENERATION_ABORTED",
739
788
  message: "Video generation request was aborted"
@@ -749,7 +798,7 @@ var GoogleVertexVideoModel = class {
749
798
  operationName
750
799
  },
751
800
  successfulResponseHandler: createJsonResponseHandler3(
752
- vertexOperationSchema
801
+ googleVertexOperationSchema
753
802
  ),
754
803
  failedResponseHandler: googleVertexFailedResponseHandler,
755
804
  abortSignal: options.abortSignal,
@@ -809,53 +858,39 @@ var GoogleVertexVideoModel = class {
809
858
  modelId: this.modelId,
810
859
  headers: responseHeaders
811
860
  },
812
- providerMetadata: {
813
- "google-vertex": {
814
- videos: videoMetadata
815
- }
816
- }
861
+ providerMetadata: /* @__PURE__ */ (() => {
862
+ const payload = { videos: videoMetadata };
863
+ return {
864
+ googleVertex: payload,
865
+ // Legacy keys preserved for backward compatibility.
866
+ "google-vertex": payload,
867
+ vertex: payload
868
+ };
869
+ })()
817
870
  };
818
871
  }
819
872
  };
820
- var vertexOperationSchema = z5.object({
821
- name: z5.string().nullish(),
822
- done: z5.boolean().nullish(),
823
- error: z5.object({
824
- code: z5.number().nullish(),
825
- message: z5.string(),
826
- status: z5.string().nullish()
873
+ var googleVertexOperationSchema = z7.object({
874
+ name: z7.string().nullish(),
875
+ done: z7.boolean().nullish(),
876
+ error: z7.object({
877
+ code: z7.number().nullish(),
878
+ message: z7.string(),
879
+ status: z7.string().nullish()
827
880
  }).nullish(),
828
- response: z5.object({
829
- videos: z5.array(
830
- z5.object({
831
- bytesBase64Encoded: z5.string().nullish(),
832
- gcsUri: z5.string().nullish(),
833
- mimeType: z5.string().nullish()
881
+ response: z7.object({
882
+ videos: z7.array(
883
+ z7.object({
884
+ bytesBase64Encoded: z7.string().nullish(),
885
+ gcsUri: z7.string().nullish(),
886
+ mimeType: z7.string().nullish()
834
887
  })
835
888
  ).nullish(),
836
- raiMediaFilteredCount: z5.number().nullish()
889
+ raiMediaFilteredCount: z7.number().nullish()
837
890
  }).nullish()
838
891
  });
839
- var googleVertexVideoModelOptionsSchema = lazySchema(
840
- () => zodSchema(
841
- z5.object({
842
- pollIntervalMs: z5.number().positive().nullish(),
843
- pollTimeoutMs: z5.number().positive().nullish(),
844
- personGeneration: z5.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
845
- negativePrompt: z5.string().nullish(),
846
- generateAudio: z5.boolean().nullish(),
847
- gcsOutputDirectory: z5.string().nullish(),
848
- referenceImages: z5.array(
849
- z5.object({
850
- bytesBase64Encoded: z5.string().nullish(),
851
- gcsUri: z5.string().nullish()
852
- })
853
- ).nullish()
854
- }).passthrough()
855
- )
856
- );
857
892
 
858
- // src/google-vertex-provider.ts
893
+ // src/google-vertex-provider-base.ts
859
894
  var EXPRESS_MODE_BASE_URL = "https://aiplatform.googleapis.com/v1/publishers/google";
860
895
  function createExpressModeFetch(apiKey, customFetch) {
861
896
  return async (url, init) => {
@@ -869,18 +904,18 @@ function createExpressModeFetch(apiKey, customFetch) {
869
904
  return (customFetch != null ? customFetch : fetch)(url.toString(), modifiedInit);
870
905
  };
871
906
  }
872
- function createVertex(options = {}) {
907
+ function createGoogleVertex(options = {}) {
873
908
  const apiKey = loadOptionalSetting({
874
909
  settingValue: options.apiKey,
875
910
  environmentVariableName: "GOOGLE_VERTEX_API_KEY"
876
911
  });
877
- const loadVertexProject = () => loadSetting({
912
+ const loadGoogleVertexProject = () => loadSetting({
878
913
  settingValue: options.project,
879
914
  settingName: "project",
880
915
  environmentVariableName: "GOOGLE_VERTEX_PROJECT",
881
916
  description: "Google Vertex project"
882
917
  });
883
- const loadVertexLocation = () => loadSetting({
918
+ const loadGoogleVertexLocation = () => loadSetting({
884
919
  settingValue: options.location,
885
920
  settingName: "location",
886
921
  environmentVariableName: "GOOGLE_VERTEX_LOCATION",
@@ -891,8 +926,8 @@ function createVertex(options = {}) {
891
926
  if (apiKey) {
892
927
  return (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : EXPRESS_MODE_BASE_URL;
893
928
  }
894
- const region = loadVertexLocation();
895
- const project = loadVertexProject();
929
+ const region = loadGoogleVertexLocation();
930
+ const project = loadGoogleVertexProject();
896
931
  const baseHost = `${region === "global" ? "" : region + "-"}aiplatform.googleapis.com`;
897
932
  return (_b = withoutTrailingSlash(options.baseURL)) != null ? _b : `https://${baseHost}/v1beta1/projects/${project}/locations/${region}/publishers/google`;
898
933
  };
@@ -962,16 +997,16 @@ function createVertex(options = {}) {
962
997
  return provider;
963
998
  }
964
999
 
965
- // src/google-vertex-provider-node.ts
966
- function createVertex2(options = {}) {
1000
+ // src/google-vertex-provider.ts
1001
+ function createGoogleVertex2(options = {}) {
967
1002
  const apiKey = loadOptionalSetting2({
968
1003
  settingValue: options.apiKey,
969
1004
  environmentVariableName: "GOOGLE_VERTEX_API_KEY"
970
1005
  });
971
1006
  if (apiKey) {
972
- return createVertex(options);
1007
+ return createGoogleVertex(options);
973
1008
  }
974
- return createVertex({
1009
+ return createGoogleVertex({
975
1010
  ...options,
976
1011
  headers: async () => ({
977
1012
  Authorization: `Bearer ${await generateAuthToken(
@@ -981,10 +1016,12 @@ function createVertex2(options = {}) {
981
1016
  })
982
1017
  });
983
1018
  }
984
- var vertex = createVertex2();
1019
+ var googleVertex = createGoogleVertex2();
985
1020
  export {
986
1021
  VERSION,
987
- createVertex2 as createVertex,
988
- vertex
1022
+ createGoogleVertex2 as createGoogleVertex,
1023
+ createGoogleVertex2 as createVertex,
1024
+ googleVertex,
1025
+ googleVertex as vertex
989
1026
  };
990
1027
  //# sourceMappingURL=index.js.map