@ai-sdk/google-vertex 2.2.22 → 3.0.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/CHANGELOG.md +258 -62
  2. package/README.md +1 -1
  3. package/anthropic/edge.d.ts +1 -0
  4. package/anthropic/index.d.ts +1 -0
  5. package/{anthropic/edge/dist → dist/anthropic/edge}/index.d.mts +5 -7
  6. package/{anthropic/edge/dist → dist/anthropic/edge}/index.d.ts +5 -7
  7. package/{anthropic/edge/dist → dist/anthropic/edge}/index.js +19 -21
  8. package/dist/anthropic/edge/index.js.map +1 -0
  9. package/{anthropic/edge/dist → dist/anthropic/edge}/index.mjs +19 -21
  10. package/dist/anthropic/edge/index.mjs.map +1 -0
  11. package/{anthropic/dist → dist/anthropic}/index.d.mts +5 -7
  12. package/{anthropic/dist → dist/anthropic}/index.d.ts +5 -7
  13. package/{anthropic/dist → dist/anthropic}/index.js +19 -21
  14. package/dist/anthropic/index.js.map +1 -0
  15. package/{anthropic/dist → dist/anthropic}/index.mjs +19 -21
  16. package/dist/anthropic/index.mjs.map +1 -0
  17. package/{edge/dist → dist/edge}/index.d.mts +8 -17
  18. package/{edge/dist → dist/edge}/index.d.ts +8 -17
  19. package/{edge/dist → dist/edge}/index.js +66 -57
  20. package/dist/edge/index.js.map +1 -0
  21. package/{edge/dist → dist/edge}/index.mjs +69 -59
  22. package/dist/edge/index.mjs.map +1 -0
  23. package/dist/index.d.mts +6 -15
  24. package/dist/index.d.ts +6 -15
  25. package/dist/index.js +66 -57
  26. package/dist/index.js.map +1 -1
  27. package/dist/index.mjs +69 -59
  28. package/dist/index.mjs.map +1 -1
  29. package/edge.d.ts +1 -0
  30. package/package.json +29 -28
  31. package/anthropic/dist/index.js.map +0 -1
  32. package/anthropic/dist/index.mjs.map +0 -1
  33. package/anthropic/edge/dist/index.js.map +0 -1
  34. package/anthropic/edge/dist/index.mjs.map +0 -1
  35. package/edge/dist/index.js.map +0 -1
  36. package/edge/dist/index.mjs.map +0 -1
@@ -2,6 +2,7 @@
2
2
  import { resolve as resolve3 } from "@ai-sdk/provider-utils";
3
3
 
4
4
  // src/google-vertex-provider.ts
5
+ import { GoogleGenerativeAILanguageModel } from "@ai-sdk/google/internal";
5
6
  import {
6
7
  generateId,
7
8
  loadSetting,
@@ -16,9 +17,10 @@ import {
16
17
  combineHeaders,
17
18
  createJsonResponseHandler,
18
19
  postJsonToApi,
19
- resolve
20
+ resolve,
21
+ parseProviderOptions
20
22
  } from "@ai-sdk/provider-utils";
21
- import { z as z2 } from "zod";
23
+ import { z as z3 } from "zod";
22
24
 
23
25
  // src/google-vertex-error.ts
24
26
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
@@ -37,28 +39,40 @@ var googleVertexFailedResponseHandler = createJsonErrorResponseHandler(
37
39
  }
38
40
  );
39
41
 
42
+ // src/google-vertex-embedding-options.ts
43
+ import { z as z2 } from "zod";
44
+ var googleVertexEmbeddingProviderOptions = z2.object({
45
+ /**
46
+ * Optional. Optional reduced dimension for the output embedding.
47
+ * If set, excessive values in the output embedding are truncated from the end.
48
+ */
49
+ outputDimensionality: z2.number().optional()
50
+ });
51
+
40
52
  // src/google-vertex-embedding-model.ts
41
53
  var GoogleVertexEmbeddingModel = class {
42
- constructor(modelId, settings, config) {
43
- this.specificationVersion = "v1";
54
+ constructor(modelId, config) {
55
+ this.specificationVersion = "v2";
56
+ this.maxEmbeddingsPerCall = 2048;
57
+ this.supportsParallelCalls = true;
44
58
  this.modelId = modelId;
45
- this.settings = settings;
46
59
  this.config = config;
47
60
  }
48
61
  get provider() {
49
62
  return this.config.provider;
50
63
  }
51
- get maxEmbeddingsPerCall() {
52
- return 2048;
53
- }
54
- get supportsParallelCalls() {
55
- return true;
56
- }
57
64
  async doEmbed({
58
65
  values,
59
66
  headers,
60
- abortSignal
67
+ abortSignal,
68
+ providerOptions
61
69
  }) {
70
+ var _a;
71
+ const googleOptions = (_a = await parseProviderOptions({
72
+ provider: "google",
73
+ providerOptions,
74
+ schema: googleVertexEmbeddingProviderOptions
75
+ })) != null ? _a : {};
62
76
  if (values.length > this.maxEmbeddingsPerCall) {
63
77
  throw new TooManyEmbeddingValuesForCallError({
64
78
  provider: this.provider,
@@ -72,13 +86,17 @@ var GoogleVertexEmbeddingModel = class {
72
86
  headers
73
87
  );
74
88
  const url = `${this.config.baseURL}/models/${this.modelId}:predict`;
75
- const { responseHeaders, value: response } = await postJsonToApi({
89
+ const {
90
+ responseHeaders,
91
+ value: response,
92
+ rawValue
93
+ } = await postJsonToApi({
76
94
  url,
77
95
  headers: mergedHeaders,
78
96
  body: {
79
97
  instances: values.map((value) => ({ content: value })),
80
98
  parameters: {
81
- outputDimensionality: this.settings.outputDimensionality
99
+ outputDimensionality: googleOptions.outputDimensionality
82
100
  }
83
101
  },
84
102
  failedResponseHandler: googleVertexFailedResponseHandler,
@@ -98,49 +116,43 @@ var GoogleVertexEmbeddingModel = class {
98
116
  0
99
117
  )
100
118
  },
101
- rawResponse: { headers: responseHeaders }
119
+ response: { headers: responseHeaders, body: rawValue }
102
120
  };
103
121
  }
104
122
  };
105
- var googleVertexTextEmbeddingResponseSchema = z2.object({
106
- predictions: z2.array(
107
- z2.object({
108
- embeddings: z2.object({
109
- values: z2.array(z2.number()),
110
- statistics: z2.object({
111
- token_count: z2.number()
123
+ var googleVertexTextEmbeddingResponseSchema = z3.object({
124
+ predictions: z3.array(
125
+ z3.object({
126
+ embeddings: z3.object({
127
+ values: z3.array(z3.number()),
128
+ statistics: z3.object({
129
+ token_count: z3.number()
112
130
  })
113
131
  })
114
132
  })
115
133
  )
116
134
  });
117
135
 
118
- // src/google-vertex-provider.ts
119
- import { GoogleGenerativeAILanguageModel } from "@ai-sdk/google/internal";
120
-
121
136
  // src/google-vertex-image-model.ts
122
137
  import {
123
138
  combineHeaders as combineHeaders2,
124
139
  createJsonResponseHandler as createJsonResponseHandler2,
125
- parseProviderOptions,
140
+ parseProviderOptions as parseProviderOptions2,
126
141
  postJsonToApi as postJsonToApi2,
127
142
  resolve as resolve2
128
143
  } from "@ai-sdk/provider-utils";
129
- import { z as z3 } from "zod";
144
+ import { z as z4 } from "zod";
130
145
  var GoogleVertexImageModel = class {
131
- constructor(modelId, settings, config) {
146
+ constructor(modelId, config) {
132
147
  this.modelId = modelId;
133
- this.settings = settings;
134
148
  this.config = config;
135
- this.specificationVersion = "v1";
149
+ this.specificationVersion = "v2";
150
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#parameter_list
151
+ this.maxImagesPerCall = 4;
136
152
  }
137
153
  get provider() {
138
154
  return this.config.provider;
139
155
  }
140
- get maxImagesPerCall() {
141
- var _a;
142
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 4;
143
- }
144
156
  async doGenerate({
145
157
  prompt,
146
158
  n,
@@ -160,7 +172,7 @@ var GoogleVertexImageModel = class {
160
172
  details: "This model does not support the `size` option. Use `aspectRatio` instead."
161
173
  });
162
174
  }
163
- const vertexImageOptions = parseProviderOptions({
175
+ const vertexImageOptions = await parseProviderOptions2({
164
176
  provider: "vertex",
165
177
  providerOptions,
166
178
  schema: vertexImageProviderOptionsSchema
@@ -199,27 +211,22 @@ var GoogleVertexImageModel = class {
199
211
  };
200
212
  }
201
213
  };
202
- var vertexImageResponseSchema = z3.object({
203
- predictions: z3.array(z3.object({ bytesBase64Encoded: z3.string() })).nullish()
214
+ var vertexImageResponseSchema = z4.object({
215
+ predictions: z4.array(z4.object({ bytesBase64Encoded: z4.string() })).nullish()
204
216
  });
205
- var vertexImageProviderOptionsSchema = z3.object({
206
- negativePrompt: z3.string().nullish(),
207
- personGeneration: z3.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
208
- safetySetting: z3.enum([
217
+ var vertexImageProviderOptionsSchema = z4.object({
218
+ negativePrompt: z4.string().nullish(),
219
+ personGeneration: z4.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
220
+ safetySetting: z4.enum([
209
221
  "block_low_and_above",
210
222
  "block_medium_and_above",
211
223
  "block_only_high",
212
224
  "block_none"
213
225
  ]).nullish(),
214
- addWatermark: z3.boolean().nullish(),
215
- storageUri: z3.string().nullish()
226
+ addWatermark: z4.boolean().nullish(),
227
+ storageUri: z4.string().nullish()
216
228
  });
217
229
 
218
- // src/google-vertex-supported-file-url.ts
219
- function isSupportedFileUrl(url) {
220
- return ["http:", "https:", "gs:"].includes(url.protocol);
221
- }
222
-
223
230
  // src/google-vertex-provider.ts
224
231
  function createVertex(options = {}) {
225
232
  const loadVertexProject = () => loadSetting({
@@ -249,27 +256,30 @@ function createVertex(options = {}) {
249
256
  baseURL: loadBaseURL()
250
257
  };
251
258
  };
252
- const createChatModel = (modelId, settings = {}) => {
259
+ const createChatModel = (modelId) => {
253
260
  var _a;
254
- return new GoogleGenerativeAILanguageModel(modelId, settings, {
261
+ return new GoogleGenerativeAILanguageModel(modelId, {
255
262
  ...createConfig("chat"),
256
263
  generateId: (_a = options.generateId) != null ? _a : generateId,
257
- isSupportedUrl: isSupportedFileUrl
264
+ supportedUrls: () => ({
265
+ "*": [
266
+ // HTTP URLs:
267
+ /^https?:\/\/.*$/,
268
+ // Google Cloud Storage URLs:
269
+ /^gs:\/\/.*$/
270
+ ]
271
+ })
258
272
  });
259
273
  };
260
- const createEmbeddingModel = (modelId, settings = {}) => new GoogleVertexEmbeddingModel(
261
- modelId,
262
- settings,
263
- createConfig("embedding")
264
- );
265
- const createImageModel = (modelId, settings = {}) => new GoogleVertexImageModel(modelId, settings, createConfig("image"));
266
- const provider = function(modelId, settings) {
274
+ const createEmbeddingModel = (modelId) => new GoogleVertexEmbeddingModel(modelId, createConfig("embedding"));
275
+ const createImageModel = (modelId) => new GoogleVertexImageModel(modelId, createConfig("image"));
276
+ const provider = function(modelId) {
267
277
  if (new.target) {
268
278
  throw new Error(
269
279
  "The Google Vertex AI model function cannot be called with the new keyword."
270
280
  );
271
281
  }
272
- return createChatModel(modelId, settings);
282
+ return createChatModel(modelId);
273
283
  };
274
284
  provider.languageModel = createChatModel;
275
285
  provider.textEmbeddingModel = createEmbeddingModel;
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/edge/google-vertex-provider-edge.ts","../../src/google-vertex-provider.ts","../../src/google-vertex-embedding-model.ts","../../src/google-vertex-error.ts","../../src/google-vertex-embedding-options.ts","../../src/google-vertex-image-model.ts","../../src/edge/google-vertex-auth-edge.ts"],"sourcesContent":["import { resolve } from '@ai-sdk/provider-utils';\nimport {\n createVertex as createVertexOriginal,\n GoogleVertexProvider,\n GoogleVertexProviderSettings as GoogleVertexProviderSettingsOriginal,\n} from '../google-vertex-provider';\nimport {\n generateAuthToken,\n GoogleCredentials,\n} from './google-vertex-auth-edge';\n\nexport type { GoogleVertexProvider };\n\nexport interface GoogleVertexProviderSettings\n extends GoogleVertexProviderSettingsOriginal {\n /**\n * Optional. The Google credentials for the Google Cloud service account. If\n * not provided, the Google Vertex provider will use environment variables to\n * load the credentials.\n */\n googleCredentials?: GoogleCredentials;\n}\n\nexport function createVertex(\n options: GoogleVertexProviderSettings = {},\n): GoogleVertexProvider {\n return createVertexOriginal({\n ...options,\n headers: async () => ({\n Authorization: `Bearer ${await generateAuthToken(\n options.googleCredentials,\n )}`,\n ...(await resolve(options.headers)),\n }),\n });\n}\n\n/**\nDefault Google Vertex AI provider instance.\n */\nexport const vertex = createVertex();\n","import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal';\nimport { ImageModelV2, LanguageModelV2, ProviderV2 } from '@ai-sdk/provider';\nimport {\n FetchFunction,\n generateId,\n loadSetting,\n Resolvable,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { GoogleVertexConfig } from './google-vertex-config';\nimport { GoogleVertexEmbeddingModel } from './google-vertex-embedding-model';\nimport { GoogleVertexEmbeddingModelId } from './google-vertex-embedding-options';\nimport { GoogleVertexImageModel } from './google-vertex-image-model';\nimport { GoogleVertexImageModelId } from './google-vertex-image-settings';\nimport { GoogleVertexModelId } from './google-vertex-options';\n\nexport interface GoogleVertexProvider extends ProviderV2 {\n /**\nCreates a model for text generation.\n */\n (modelId: GoogleVertexModelId): LanguageModelV2;\n\n languageModel: (modelId: GoogleVertexModelId) => LanguageModelV2;\n\n /**\n * Creates a model for image generation.\n */\n image(modelId: GoogleVertexImageModelId): ImageModelV2;\n\n /**\nCreates a model for image generation.\n */\n imageModel(modelId: GoogleVertexImageModelId): ImageModelV2;\n}\n\nexport interface GoogleVertexProviderSettings {\n /**\nYour Google Vertex location. Defaults to the environment variable `GOOGLE_VERTEX_LOCATION`.\n */\n location?: string;\n\n /**\nYour Google Vertex project. Defaults to the environment variable `GOOGLE_VERTEX_PROJECT`.\n */\n project?: string;\n\n /**\n * Headers to use for requests. Can be:\n * - A headers object\n * - A Promise that resolves to a headers object\n * - A function that returns a headers object\n * - A function that returns a Promise of a headers object\n */\n headers?: Resolvable<Record<string, string | undefined>>;\n\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n\n // for testing\n generateId?: () => string;\n\n /**\nBase URL for the Google Vertex API calls.\n */\n baseURL?: string;\n}\n\n/**\nCreate a Google Vertex AI provider instance.\n */\nexport function createVertex(\n options: GoogleVertexProviderSettings = {},\n): GoogleVertexProvider {\n const loadVertexProject = () =>\n loadSetting({\n settingValue: options.project,\n settingName: 'project',\n environmentVariableName: 'GOOGLE_VERTEX_PROJECT',\n description: 'Google Vertex project',\n });\n\n const loadVertexLocation = () =>\n loadSetting({\n settingValue: options.location,\n settingName: 'location',\n environmentVariableName: 'GOOGLE_VERTEX_LOCATION',\n description: 'Google Vertex location',\n });\n\n const loadBaseURL = () => {\n const region = loadVertexLocation();\n const project = loadVertexProject();\n return (\n withoutTrailingSlash(options.baseURL) ??\n `https://${region}-aiplatform.googleapis.com/v1/projects/${project}/locations/${region}/publishers/google`\n );\n };\n\n const createConfig = (name: string): GoogleVertexConfig => {\n return {\n provider: `google.vertex.${name}`,\n headers: options.headers ?? {},\n fetch: options.fetch,\n baseURL: loadBaseURL(),\n };\n };\n\n const createChatModel = (modelId: GoogleVertexModelId) => {\n return new GoogleGenerativeAILanguageModel(modelId, {\n ...createConfig('chat'),\n generateId: options.generateId ?? generateId,\n supportedUrls: () => ({\n '*': [\n // HTTP URLs:\n /^https?:\\/\\/.*$/,\n // Google Cloud Storage URLs:\n /^gs:\\/\\/.*$/,\n ],\n }),\n });\n };\n\n const createEmbeddingModel = (modelId: GoogleVertexEmbeddingModelId) =>\n new GoogleVertexEmbeddingModel(modelId, createConfig('embedding'));\n\n const createImageModel = (modelId: GoogleVertexImageModelId) =>\n new GoogleVertexImageModel(modelId, createConfig('image'));\n\n const provider = function (modelId: GoogleVertexModelId) {\n if (new.target) {\n throw new Error(\n 'The Google Vertex AI model function cannot be called with the new keyword.',\n );\n }\n\n return createChatModel(modelId);\n };\n\n provider.languageModel = createChatModel;\n provider.textEmbeddingModel = createEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n","import {\n EmbeddingModelV2,\n TooManyEmbeddingValuesForCallError,\n} from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n postJsonToApi,\n resolve,\n Resolvable,\n parseProviderOptions,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod';\nimport { googleVertexFailedResponseHandler } from './google-vertex-error';\nimport {\n GoogleVertexEmbeddingModelId,\n googleVertexEmbeddingProviderOptions,\n} from './google-vertex-embedding-options';\nimport { GoogleVertexConfig } from './google-vertex-config';\n\nexport class GoogleVertexEmbeddingModel implements EmbeddingModelV2<string> {\n readonly specificationVersion = 'v2';\n readonly modelId: GoogleVertexEmbeddingModelId;\n readonly maxEmbeddingsPerCall = 2048;\n readonly supportsParallelCalls = true;\n\n private readonly config: GoogleVertexConfig;\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n modelId: GoogleVertexEmbeddingModelId,\n config: GoogleVertexConfig,\n ) {\n this.modelId = modelId;\n this.config = config;\n }\n\n async doEmbed({\n values,\n headers,\n abortSignal,\n providerOptions,\n }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<\n Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>\n > {\n // Parse provider options\n const googleOptions =\n (await parseProviderOptions({\n provider: 'google',\n providerOptions,\n schema: googleVertexEmbeddingProviderOptions,\n })) ?? {};\n\n if (values.length > this.maxEmbeddingsPerCall) {\n throw new TooManyEmbeddingValuesForCallError({\n provider: this.provider,\n modelId: this.modelId,\n maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,\n values,\n });\n }\n\n const mergedHeaders = combineHeaders(\n await resolve(this.config.headers),\n headers,\n );\n\n const url = `${this.config.baseURL}/models/${this.modelId}:predict`;\n const {\n responseHeaders,\n value: response,\n rawValue,\n } = await postJsonToApi({\n url,\n headers: mergedHeaders,\n body: {\n instances: values.map(value => ({ content: value })),\n parameters: {\n outputDimensionality: googleOptions.outputDimensionality,\n },\n },\n failedResponseHandler: googleVertexFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n googleVertexTextEmbeddingResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n embeddings: response.predictions.map(\n prediction => prediction.embeddings.values,\n ),\n usage: {\n tokens: response.predictions.reduce(\n (tokenCount, prediction) =>\n tokenCount + prediction.embeddings.statistics.token_count,\n 0,\n ),\n },\n response: { headers: responseHeaders, body: rawValue },\n };\n }\n}\n\n// minimal version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst googleVertexTextEmbeddingResponseSchema = z.object({\n predictions: z.array(\n z.object({\n embeddings: z.object({\n values: z.array(z.number()),\n statistics: z.object({\n token_count: z.number(),\n }),\n }),\n }),\n ),\n});\n","import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\nimport { z } from 'zod';\n\nconst googleVertexErrorDataSchema = z.object({\n error: z.object({\n code: z.number().nullable(),\n message: z.string(),\n status: z.string(),\n }),\n});\n\nexport type GoogleVertexErrorData = z.infer<typeof googleVertexErrorDataSchema>;\n\nexport const googleVertexFailedResponseHandler = createJsonErrorResponseHandler(\n {\n errorSchema: googleVertexErrorDataSchema,\n errorToMessage: data => data.error.message,\n },\n);\n","import { z } from 'zod';\n\n// https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api\nexport type GoogleVertexEmbeddingModelId =\n | 'textembedding-gecko'\n | 'textembedding-gecko@001'\n | 'textembedding-gecko@003'\n | 'textembedding-gecko-multilingual'\n | 'textembedding-gecko-multilingual@001'\n | 'text-multilingual-embedding-002'\n | 'text-embedding-004'\n | 'text-embedding-005'\n | (string & {});\n\nexport const googleVertexEmbeddingProviderOptions = z.object({\n /**\n * Optional. Optional reduced dimension for the output embedding.\n * If set, excessive values in the output embedding are truncated from the end.\n */\n outputDimensionality: z.number().optional(),\n});\n\nexport type GoogleVertexEmbeddingProviderOptions = z.infer<\n typeof googleVertexEmbeddingProviderOptions\n>;\n","import { ImageModelV2, ImageModelV2CallWarning } from '@ai-sdk/provider';\nimport {\n Resolvable,\n combineHeaders,\n createJsonResponseHandler,\n parseProviderOptions,\n postJsonToApi,\n resolve,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod';\nimport { googleVertexFailedResponseHandler } from './google-vertex-error';\nimport { GoogleVertexImageModelId } from './google-vertex-image-settings';\n\ninterface GoogleVertexImageModelConfig {\n provider: string;\n baseURL: string;\n headers?: Resolvable<Record<string, string | undefined>>;\n fetch?: typeof fetch;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\n// https://cloud.google.com/vertex-ai/generative-ai/docs/image/generate-images\nexport class GoogleVertexImageModel implements ImageModelV2 {\n readonly specificationVersion = 'v2';\n // https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#parameter_list\n readonly maxImagesPerCall = 4;\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: GoogleVertexImageModelId,\n private config: GoogleVertexImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n aspectRatio,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV2['doGenerate']>>\n > {\n const warnings: Array<ImageModelV2CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'size',\n details:\n 'This model does not support the `size` option. Use `aspectRatio` instead.',\n });\n }\n\n const vertexImageOptions = await parseProviderOptions({\n provider: 'vertex',\n providerOptions,\n schema: vertexImageProviderOptionsSchema,\n });\n\n const body = {\n instances: [{ prompt }],\n parameters: {\n sampleCount: n,\n ...(aspectRatio != null ? { aspectRatio } : {}),\n ...(seed != null ? { seed } : {}),\n ...(vertexImageOptions ?? {}),\n },\n };\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/models/${this.modelId}:predict`,\n headers: combineHeaders(await resolve(this.config.headers), headers),\n body,\n failedResponseHandler: googleVertexFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n vertexImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images:\n response.predictions?.map(\n (p: { bytesBase64Encoded: string }) => p.bytesBase64Encoded,\n ) ?? [],\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// minimal version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst vertexImageResponseSchema = z.object({\n predictions: z.array(z.object({ bytesBase64Encoded: z.string() })).nullish(),\n});\n\nconst vertexImageProviderOptionsSchema = z.object({\n negativePrompt: z.string().nullish(),\n personGeneration: z\n .enum(['dont_allow', 'allow_adult', 'allow_all'])\n .nullish(),\n safetySetting: z\n .enum([\n 'block_low_and_above',\n 'block_medium_and_above',\n 'block_only_high',\n 'block_none',\n ])\n .nullish(),\n addWatermark: z.boolean().nullish(),\n storageUri: z.string().nullish(),\n});\nexport type GoogleVertexImageProviderOptions = z.infer<\n typeof vertexImageProviderOptionsSchema\n>;\n","import { loadOptionalSetting, loadSetting } from '@ai-sdk/provider-utils';\n\nexport interface GoogleCredentials {\n /**\n * The client email for the Google Cloud service account. Defaults to the\n * value of the `GOOGLE_CLIENT_EMAIL` environment variable.\n */\n clientEmail: string;\n\n /**\n * The private key for the Google Cloud service account. Defaults to the\n * value of the `GOOGLE_PRIVATE_KEY` environment variable.\n */\n privateKey: string;\n\n /**\n * Optional. The private key ID for the Google Cloud service account. Defaults\n * to the value of the `GOOGLE_PRIVATE_KEY_ID` environment variable.\n */\n privateKeyId?: string;\n}\n\nconst loadCredentials = async (): Promise<GoogleCredentials> => {\n try {\n return {\n clientEmail: loadSetting({\n settingValue: undefined,\n settingName: 'clientEmail',\n environmentVariableName: 'GOOGLE_CLIENT_EMAIL',\n description: 'Google client email',\n }),\n privateKey: loadSetting({\n settingValue: undefined,\n settingName: 'privateKey',\n environmentVariableName: 'GOOGLE_PRIVATE_KEY',\n description: 'Google private key',\n }),\n privateKeyId: loadOptionalSetting({\n settingValue: undefined,\n environmentVariableName: 'GOOGLE_PRIVATE_KEY_ID',\n }),\n };\n } catch (error: any) {\n throw new Error(`Failed to load Google credentials: ${error.message}`);\n }\n};\n\n// Convert a string to base64url\nconst base64url = (str: string) => {\n return btoa(str).replace(/\\+/g, '-').replace(/\\//g, '_').replace(/=/g, '');\n};\nconst importPrivateKey = async (pemKey: string) => {\n const pemHeader = '-----BEGIN PRIVATE KEY-----';\n const pemFooter = '-----END PRIVATE KEY-----';\n\n // Remove header, footer, and any whitespace/newlines\n const pemContents = pemKey\n .replace(pemHeader, '')\n .replace(pemFooter, '')\n .replace(/\\s/g, '');\n\n // Decode base64 to binary\n const binaryString = atob(pemContents);\n\n // Convert binary string to Uint8Array\n const binaryData = new Uint8Array(binaryString.length);\n for (let i = 0; i < binaryString.length; i++) {\n binaryData[i] = binaryString.charCodeAt(i);\n }\n\n return await crypto.subtle.importKey(\n 'pkcs8',\n binaryData,\n { name: 'RSASSA-PKCS1-v1_5', hash: 'SHA-256' },\n true,\n ['sign'],\n );\n};\n\nconst buildJwt = async (credentials: GoogleCredentials) => {\n const now = Math.floor(Date.now() / 1000);\n\n // Only include kid in header if privateKeyId is provided\n const header: { alg: string; typ: string; kid?: string } = {\n alg: 'RS256',\n typ: 'JWT',\n };\n\n if (credentials.privateKeyId) {\n header.kid = credentials.privateKeyId;\n }\n\n const payload = {\n iss: credentials.clientEmail,\n scope: 'https://www.googleapis.com/auth/cloud-platform',\n aud: 'https://oauth2.googleapis.com/token',\n exp: now + 3600,\n iat: now,\n };\n\n const privateKey = await importPrivateKey(credentials.privateKey);\n\n const signingInput = `${base64url(JSON.stringify(header))}.${base64url(\n JSON.stringify(payload),\n )}`;\n const encoder = new TextEncoder();\n const data = encoder.encode(signingInput);\n\n const signature = await crypto.subtle.sign(\n 'RSASSA-PKCS1-v1_5',\n privateKey,\n data,\n );\n\n const signatureBase64 = base64url(\n String.fromCharCode(...new Uint8Array(signature)),\n );\n\n return `${base64url(JSON.stringify(header))}.${base64url(\n JSON.stringify(payload),\n )}.${signatureBase64}`;\n};\n\n/**\n * Generate an authentication token for Google Vertex AI in a manner compatible\n * with the Edge runtime.\n */\nexport async function generateAuthToken(credentials?: GoogleCredentials) {\n try {\n const creds = credentials || (await loadCredentials());\n const jwt = await buildJwt(creds);\n\n const response = await fetch('https://oauth2.googleapis.com/token', {\n method: 'POST',\n headers: { 'Content-Type': 'application/x-www-form-urlencoded' },\n body: new URLSearchParams({\n grant_type: 'urn:ietf:params:oauth:grant-type:jwt-bearer',\n assertion: jwt,\n }),\n });\n\n if (!response.ok) {\n throw new Error(`Token request failed: ${response.statusText}`);\n }\n\n const data = await response.json();\n return data.access_token;\n } catch (error) {\n throw error;\n }\n}\n"],"mappings":";AAAA,SAAS,WAAAA,gBAAe;;;ACAxB,SAAS,uCAAuC;AAEhD;AAAA,EAEE;AAAA,EACA;AAAA,EAEA;AAAA,OACK;;;ACRP;AAAA,EAEE;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,OACK;AACP,SAAS,KAAAC,UAAS;;;ACZlB,SAAS,sCAAsC;AAC/C,SAAS,SAAS;AAElB,IAAM,8BAA8B,EAAE,OAAO;AAAA,EAC3C,OAAO,EAAE,OAAO;AAAA,IACd,MAAM,EAAE,OAAO,EAAE,SAAS;AAAA,IAC1B,SAAS,EAAE,OAAO;AAAA,IAClB,QAAQ,EAAE,OAAO;AAAA,EACnB,CAAC;AACH,CAAC;AAIM,IAAM,oCAAoC;AAAA,EAC/C;AAAA,IACE,aAAa;AAAA,IACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,EACrC;AACF;;;AClBA,SAAS,KAAAC,UAAS;AAcX,IAAM,uCAAuCA,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA,EAK3D,sBAAsBA,GAAE,OAAO,EAAE,SAAS;AAC5C,CAAC;;;AFAM,IAAM,6BAAN,MAAqE;AAAA,EAY1E,YACE,SACA,QACA;AAdF,SAAS,uBAAuB;AAEhC,SAAS,uBAAuB;AAChC,SAAS,wBAAwB;AAY/B,SAAK,UAAU;AACf,SAAK,SAAS;AAAA,EAChB;AAAA,EAVA,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAUA,MAAM,QAAQ;AAAA,IACZ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AA/CJ;AAiDI,UAAM,iBACH,WAAM,qBAAqB;AAAA,MAC1B,UAAU;AAAA,MACV;AAAA,MACA,QAAQ;AAAA,IACV,CAAC,MAJA,YAIM,CAAC;AAEV,QAAI,OAAO,SAAS,KAAK,sBAAsB;AAC7C,YAAM,IAAI,mCAAmC;AAAA,QAC3C,UAAU,KAAK;AAAA,QACf,SAAS,KAAK;AAAA,QACd,sBAAsB,KAAK;AAAA,QAC3B;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,gBAAgB;AAAA,MACpB,MAAM,QAAQ,KAAK,OAAO,OAAO;AAAA,MACjC;AAAA,IACF;AAEA,UAAM,MAAM,GAAG,KAAK,OAAO,OAAO,WAAW,KAAK,OAAO;AACzD,UAAM;AAAA,MACJ;AAAA,MACA,OAAO;AAAA,MACP;AAAA,IACF,IAAI,MAAM,cAAc;AAAA,MACtB;AAAA,MACA,SAAS;AAAA,MACT,MAAM;AAAA,QACJ,WAAW,OAAO,IAAI,YAAU,EAAE,SAAS,MAAM,EAAE;AAAA,QACnD,YAAY;AAAA,UACV,sBAAsB,cAAc;AAAA,QACtC;AAAA,MACF;AAAA,MACA,uBAAuB;AAAA,MACvB,2BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,YAAY,SAAS,YAAY;AAAA,QAC/B,gBAAc,WAAW,WAAW;AAAA,MACtC;AAAA,MACA,OAAO;AAAA,QACL,QAAQ,SAAS,YAAY;AAAA,UAC3B,CAAC,YAAY,eACX,aAAa,WAAW,WAAW,WAAW;AAAA,UAChD;AAAA,QACF;AAAA,MACF;AAAA,MACA,UAAU,EAAE,SAAS,iBAAiB,MAAM,SAAS;AAAA,IACvD;AAAA,EACF;AACF;AAIA,IAAM,0CAA0CC,GAAE,OAAO;AAAA,EACvD,aAAaA,GAAE;AAAA,IACbA,GAAE,OAAO;AAAA,MACP,YAAYA,GAAE,OAAO;AAAA,QACnB,QAAQA,GAAE,MAAMA,GAAE,OAAO,CAAC;AAAA,QAC1B,YAAYA,GAAE,OAAO;AAAA,UACnB,aAAaA,GAAE,OAAO;AAAA,QACxB,CAAC;AAAA,MACH,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AACF,CAAC;;;AGxHD;AAAA,EAEE,kBAAAC;AAAA,EACA,6BAAAC;AAAA,EACA,wBAAAC;AAAA,EACA,iBAAAC;AAAA,EACA,WAAAC;AAAA,OACK;AACP,SAAS,KAAAC,UAAS;AAeX,IAAM,yBAAN,MAAqD;AAAA,EAS1D,YACW,SACD,QACR;AAFS;AACD;AAVV,SAAS,uBAAuB;AAEhC;AAAA,SAAS,mBAAmB;AAAA,EASzB;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AAjDJ;AAkDI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,qBAAqB,MAAMC,sBAAqB;AAAA,MACpD,UAAU;AAAA,MACV;AAAA,MACA,QAAQ;AAAA,IACV,CAAC;AAED,UAAM,OAAO;AAAA,MACX,WAAW,CAAC,EAAE,OAAO,CAAC;AAAA,MACtB,YAAY;AAAA,QACV,aAAa;AAAA,QACb,GAAI,eAAe,OAAO,EAAE,YAAY,IAAI,CAAC;AAAA,QAC7C,GAAI,QAAQ,OAAO,EAAE,KAAK,IAAI,CAAC;AAAA,QAC/B,GAAI,kDAAsB,CAAC;AAAA,MAC7B;AAAA,IACF;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,MAAMC,eAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO,WAAW,KAAK,OAAO;AAAA,MAClD,SAASC,gBAAe,MAAMC,SAAQ,KAAK,OAAO,OAAO,GAAG,OAAO;AAAA,MACnE;AAAA,MACA,uBAAuB;AAAA,MACvB,2BAA2BC;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,SACE,oBAAS,gBAAT,mBAAsB;AAAA,QACpB,CAAC,MAAsC,EAAE;AAAA,YAD3C,YAEK,CAAC;AAAA,MACR;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,4BAA4BC,GAAE,OAAO;AAAA,EACzC,aAAaA,GAAE,MAAMA,GAAE,OAAO,EAAE,oBAAoBA,GAAE,OAAO,EAAE,CAAC,CAAC,EAAE,QAAQ;AAC7E,CAAC;AAED,IAAM,mCAAmCA,GAAE,OAAO;AAAA,EAChD,gBAAgBA,GAAE,OAAO,EAAE,QAAQ;AAAA,EACnC,kBAAkBA,GACf,KAAK,CAAC,cAAc,eAAe,WAAW,CAAC,EAC/C,QAAQ;AAAA,EACX,eAAeA,GACZ,KAAK;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC,EACA,QAAQ;AAAA,EACX,cAAcA,GAAE,QAAQ,EAAE,QAAQ;AAAA,EAClC,YAAYA,GAAE,OAAO,EAAE,QAAQ;AACjC,CAAC;;;AJrDM,SAAS,aACd,UAAwC,CAAC,GACnB;AACtB,QAAM,oBAAoB,MACxB,YAAY;AAAA,IACV,cAAc,QAAQ;AAAA,IACtB,aAAa;AAAA,IACb,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf,CAAC;AAEH,QAAM,qBAAqB,MACzB,YAAY;AAAA,IACV,cAAc,QAAQ;AAAA,IACtB,aAAa;AAAA,IACb,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf,CAAC;AAEH,QAAM,cAAc,MAAM;AA5F5B;AA6FI,UAAM,SAAS,mBAAmB;AAClC,UAAM,UAAU,kBAAkB;AAClC,YACE,0BAAqB,QAAQ,OAAO,MAApC,YACA,WAAW,MAAM,0CAA0C,OAAO,cAAc,MAAM;AAAA,EAE1F;AAEA,QAAM,eAAe,CAAC,SAAqC;AArG7D;AAsGI,WAAO;AAAA,MACL,UAAU,iBAAiB,IAAI;AAAA,MAC/B,UAAS,aAAQ,YAAR,YAAmB,CAAC;AAAA,MAC7B,OAAO,QAAQ;AAAA,MACf,SAAS,YAAY;AAAA,IACvB;AAAA,EACF;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AA9G5D;AA+GI,WAAO,IAAI,gCAAgC,SAAS;AAAA,MAClD,GAAG,aAAa,MAAM;AAAA,MACtB,aAAY,aAAQ,eAAR,YAAsB;AAAA,MAClC,eAAe,OAAO;AAAA,QACpB,KAAK;AAAA;AAAA,UAEH;AAAA;AAAA,UAEA;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AAEA,QAAM,uBAAuB,CAAC,YAC5B,IAAI,2BAA2B,SAAS,aAAa,WAAW,CAAC;AAEnE,QAAM,mBAAmB,CAAC,YACxB,IAAI,uBAAuB,SAAS,aAAa,OAAO,CAAC;AAE3D,QAAM,WAAW,SAAU,SAA8B;AACvD,QAAI,YAAY;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,gBAAgB,OAAO;AAAA,EAChC;AAEA,WAAS,gBAAgB;AACzB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;;;AKnJA,SAAS,qBAAqB,eAAAC,oBAAmB;AAsBjD,IAAM,kBAAkB,YAAwC;AAC9D,MAAI;AACF,WAAO;AAAA,MACL,aAAaA,aAAY;AAAA,QACvB,cAAc;AAAA,QACd,aAAa;AAAA,QACb,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC;AAAA,MACD,YAAYA,aAAY;AAAA,QACtB,cAAc;AAAA,QACd,aAAa;AAAA,QACb,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC;AAAA,MACD,cAAc,oBAAoB;AAAA,QAChC,cAAc;AAAA,QACd,yBAAyB;AAAA,MAC3B,CAAC;AAAA,IACH;AAAA,EACF,SAAS,OAAY;AACnB,UAAM,IAAI,MAAM,sCAAsC,MAAM,OAAO,EAAE;AAAA,EACvE;AACF;AAGA,IAAM,YAAY,CAAC,QAAgB;AACjC,SAAO,KAAK,GAAG,EAAE,QAAQ,OAAO,GAAG,EAAE,QAAQ,OAAO,GAAG,EAAE,QAAQ,MAAM,EAAE;AAC3E;AACA,IAAM,mBAAmB,OAAO,WAAmB;AACjD,QAAM,YAAY;AAClB,QAAM,YAAY;AAGlB,QAAM,cAAc,OACjB,QAAQ,WAAW,EAAE,EACrB,QAAQ,WAAW,EAAE,EACrB,QAAQ,OAAO,EAAE;AAGpB,QAAM,eAAe,KAAK,WAAW;AAGrC,QAAM,aAAa,IAAI,WAAW,aAAa,MAAM;AACrD,WAAS,IAAI,GAAG,IAAI,aAAa,QAAQ,KAAK;AAC5C,eAAW,CAAC,IAAI,aAAa,WAAW,CAAC;AAAA,EAC3C;AAEA,SAAO,MAAM,OAAO,OAAO;AAAA,IACzB;AAAA,IACA;AAAA,IACA,EAAE,MAAM,qBAAqB,MAAM,UAAU;AAAA,IAC7C;AAAA,IACA,CAAC,MAAM;AAAA,EACT;AACF;AAEA,IAAM,WAAW,OAAO,gBAAmC;AACzD,QAAM,MAAM,KAAK,MAAM,KAAK,IAAI,IAAI,GAAI;AAGxC,QAAM,SAAqD;AAAA,IACzD,KAAK;AAAA,IACL,KAAK;AAAA,EACP;AAEA,MAAI,YAAY,cAAc;AAC5B,WAAO,MAAM,YAAY;AAAA,EAC3B;AAEA,QAAM,UAAU;AAAA,IACd,KAAK,YAAY;AAAA,IACjB,OAAO;AAAA,IACP,KAAK;AAAA,IACL,KAAK,MAAM;AAAA,IACX,KAAK;AAAA,EACP;AAEA,QAAM,aAAa,MAAM,iBAAiB,YAAY,UAAU;AAEhE,QAAM,eAAe,GAAG,UAAU,KAAK,UAAU,MAAM,CAAC,CAAC,IAAI;AAAA,IAC3D,KAAK,UAAU,OAAO;AAAA,EACxB,CAAC;AACD,QAAM,UAAU,IAAI,YAAY;AAChC,QAAM,OAAO,QAAQ,OAAO,YAAY;AAExC,QAAM,YAAY,MAAM,OAAO,OAAO;AAAA,IACpC;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,QAAM,kBAAkB;AAAA,IACtB,OAAO,aAAa,GAAG,IAAI,WAAW,SAAS,CAAC;AAAA,EAClD;AAEA,SAAO,GAAG,UAAU,KAAK,UAAU,MAAM,CAAC,CAAC,IAAI;AAAA,IAC7C,KAAK,UAAU,OAAO;AAAA,EACxB,CAAC,IAAI,eAAe;AACtB;AAMA,eAAsB,kBAAkB,aAAiC;AACvE,MAAI;AACF,UAAM,QAAQ,eAAgB,MAAM,gBAAgB;AACpD,UAAM,MAAM,MAAM,SAAS,KAAK;AAEhC,UAAM,WAAW,MAAM,MAAM,uCAAuC;AAAA,MAClE,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,oCAAoC;AAAA,MAC/D,MAAM,IAAI,gBAAgB;AAAA,QACxB,YAAY;AAAA,QACZ,WAAW;AAAA,MACb,CAAC;AAAA,IACH,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,IAAI,MAAM,yBAAyB,SAAS,UAAU,EAAE;AAAA,IAChE;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AACjC,WAAO,KAAK;AAAA,EACd,SAAS,OAAO;AACd,UAAM;AAAA,EACR;AACF;;;AN/HO,SAASC,cACd,UAAwC,CAAC,GACnB;AACtB,SAAO,aAAqB;AAAA,IAC1B,GAAG;AAAA,IACH,SAAS,aAAa;AAAA,MACpB,eAAe,UAAU,MAAM;AAAA,QAC7B,QAAQ;AAAA,MACV,CAAC;AAAA,MACD,GAAI,MAAMC,SAAQ,QAAQ,OAAO;AAAA,IACnC;AAAA,EACF,CAAC;AACH;AAKO,IAAM,SAASD,cAAa;","names":["resolve","z","z","z","combineHeaders","createJsonResponseHandler","parseProviderOptions","postJsonToApi","resolve","z","parseProviderOptions","postJsonToApi","combineHeaders","resolve","createJsonResponseHandler","z","loadSetting","createVertex","resolve"]}
package/dist/index.d.mts CHANGED
@@ -1,16 +1,9 @@
1
1
  import { z } from 'zod';
2
2
  import { GoogleAuthOptions } from 'google-auth-library';
3
- import { ProviderV1, LanguageModelV1, ImageModelV1 } from '@ai-sdk/provider';
3
+ import { ProviderV2, LanguageModelV2, ImageModelV2 } from '@ai-sdk/provider';
4
4
  import { Resolvable, FetchFunction } from '@ai-sdk/provider-utils';
5
- import { InternalGoogleGenerativeAISettings } from '@ai-sdk/google/internal';
6
5
 
7
6
  type GoogleVertexImageModelId = 'imagen-3.0-generate-001' | 'imagen-3.0-generate-002' | 'imagen-3.0-fast-generate-001' | (string & {});
8
- interface GoogleVertexImageSettings {
9
- /**
10
- Override the maximum number of images per call (default 4)
11
- */
12
- maxImagesPerCall?: number;
13
- }
14
7
 
15
8
  declare const vertexImageProviderOptionsSchema: z.ZodObject<{
16
9
  negativePrompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
@@ -34,23 +27,21 @@ declare const vertexImageProviderOptionsSchema: z.ZodObject<{
34
27
  type GoogleVertexImageProviderOptions = z.infer<typeof vertexImageProviderOptionsSchema>;
35
28
 
36
29
  type GoogleVertexModelId = 'gemini-2.0-flash-001' | 'gemini-1.5-flash' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-pro' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-1.0-pro-001' | 'gemini-1.0-pro-vision-001' | 'gemini-1.0-pro' | 'gemini-1.0-pro-001' | 'gemini-1.0-pro-002' | 'gemini-2.0-flash-lite-preview-02-05' | 'gemini-2.0-pro-exp-02-05' | 'gemini-2.0-flash-exp' | (string & {});
37
- interface GoogleVertexSettings extends InternalGoogleGenerativeAISettings {
38
- }
39
30
 
40
- interface GoogleVertexProvider extends ProviderV1 {
31
+ interface GoogleVertexProvider extends ProviderV2 {
41
32
  /**
42
33
  Creates a model for text generation.
43
34
  */
44
- (modelId: GoogleVertexModelId, settings?: GoogleVertexSettings): LanguageModelV1;
45
- languageModel: (modelId: GoogleVertexModelId, settings?: GoogleVertexSettings) => LanguageModelV1;
35
+ (modelId: GoogleVertexModelId): LanguageModelV2;
36
+ languageModel: (modelId: GoogleVertexModelId) => LanguageModelV2;
46
37
  /**
47
38
  * Creates a model for image generation.
48
39
  */
49
- image(modelId: GoogleVertexImageModelId, settings?: GoogleVertexImageSettings): ImageModelV1;
40
+ image(modelId: GoogleVertexImageModelId): ImageModelV2;
50
41
  /**
51
42
  Creates a model for image generation.
52
43
  */
53
- imageModel(modelId: GoogleVertexImageModelId, settings?: GoogleVertexImageSettings): ImageModelV1;
44
+ imageModel(modelId: GoogleVertexImageModelId): ImageModelV2;
54
45
  }
55
46
  interface GoogleVertexProviderSettings$1 {
56
47
  /**
package/dist/index.d.ts CHANGED
@@ -1,16 +1,9 @@
1
1
  import { z } from 'zod';
2
2
  import { GoogleAuthOptions } from 'google-auth-library';
3
- import { ProviderV1, LanguageModelV1, ImageModelV1 } from '@ai-sdk/provider';
3
+ import { ProviderV2, LanguageModelV2, ImageModelV2 } from '@ai-sdk/provider';
4
4
  import { Resolvable, FetchFunction } from '@ai-sdk/provider-utils';
5
- import { InternalGoogleGenerativeAISettings } from '@ai-sdk/google/internal';
6
5
 
7
6
  type GoogleVertexImageModelId = 'imagen-3.0-generate-001' | 'imagen-3.0-generate-002' | 'imagen-3.0-fast-generate-001' | (string & {});
8
- interface GoogleVertexImageSettings {
9
- /**
10
- Override the maximum number of images per call (default 4)
11
- */
12
- maxImagesPerCall?: number;
13
- }
14
7
 
15
8
  declare const vertexImageProviderOptionsSchema: z.ZodObject<{
16
9
  negativePrompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
@@ -34,23 +27,21 @@ declare const vertexImageProviderOptionsSchema: z.ZodObject<{
34
27
  type GoogleVertexImageProviderOptions = z.infer<typeof vertexImageProviderOptionsSchema>;
35
28
 
36
29
  type GoogleVertexModelId = 'gemini-2.0-flash-001' | 'gemini-1.5-flash' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-pro' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-1.0-pro-001' | 'gemini-1.0-pro-vision-001' | 'gemini-1.0-pro' | 'gemini-1.0-pro-001' | 'gemini-1.0-pro-002' | 'gemini-2.0-flash-lite-preview-02-05' | 'gemini-2.0-pro-exp-02-05' | 'gemini-2.0-flash-exp' | (string & {});
37
- interface GoogleVertexSettings extends InternalGoogleGenerativeAISettings {
38
- }
39
30
 
40
- interface GoogleVertexProvider extends ProviderV1 {
31
+ interface GoogleVertexProvider extends ProviderV2 {
41
32
  /**
42
33
  Creates a model for text generation.
43
34
  */
44
- (modelId: GoogleVertexModelId, settings?: GoogleVertexSettings): LanguageModelV1;
45
- languageModel: (modelId: GoogleVertexModelId, settings?: GoogleVertexSettings) => LanguageModelV1;
35
+ (modelId: GoogleVertexModelId): LanguageModelV2;
36
+ languageModel: (modelId: GoogleVertexModelId) => LanguageModelV2;
46
37
  /**
47
38
  * Creates a model for image generation.
48
39
  */
49
- image(modelId: GoogleVertexImageModelId, settings?: GoogleVertexImageSettings): ImageModelV1;
40
+ image(modelId: GoogleVertexImageModelId): ImageModelV2;
50
41
  /**
51
42
  Creates a model for image generation.
52
43
  */
53
- imageModel(modelId: GoogleVertexImageModelId, settings?: GoogleVertexImageSettings): ImageModelV1;
44
+ imageModel(modelId: GoogleVertexImageModelId): ImageModelV2;
54
45
  }
55
46
  interface GoogleVertexProviderSettings$1 {
56
47
  /**
package/dist/index.js CHANGED
@@ -50,12 +50,13 @@ async function generateAuthToken(options) {
50
50
  }
51
51
 
52
52
  // src/google-vertex-provider.ts
53
+ var import_internal = require("@ai-sdk/google/internal");
53
54
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
54
55
 
55
56
  // src/google-vertex-embedding-model.ts
56
57
  var import_provider = require("@ai-sdk/provider");
57
58
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
58
- var import_zod2 = require("zod");
59
+ var import_zod3 = require("zod");
59
60
 
60
61
  // src/google-vertex-error.ts
61
62
  var import_provider_utils = require("@ai-sdk/provider-utils");
@@ -74,28 +75,40 @@ var googleVertexFailedResponseHandler = (0, import_provider_utils.createJsonErro
74
75
  }
75
76
  );
76
77
 
78
+ // src/google-vertex-embedding-options.ts
79
+ var import_zod2 = require("zod");
80
+ var googleVertexEmbeddingProviderOptions = import_zod2.z.object({
81
+ /**
82
+ * Optional. Optional reduced dimension for the output embedding.
83
+ * If set, excessive values in the output embedding are truncated from the end.
84
+ */
85
+ outputDimensionality: import_zod2.z.number().optional()
86
+ });
87
+
77
88
  // src/google-vertex-embedding-model.ts
78
89
  var GoogleVertexEmbeddingModel = class {
79
- constructor(modelId, settings, config) {
80
- this.specificationVersion = "v1";
90
+ constructor(modelId, config) {
91
+ this.specificationVersion = "v2";
92
+ this.maxEmbeddingsPerCall = 2048;
93
+ this.supportsParallelCalls = true;
81
94
  this.modelId = modelId;
82
- this.settings = settings;
83
95
  this.config = config;
84
96
  }
85
97
  get provider() {
86
98
  return this.config.provider;
87
99
  }
88
- get maxEmbeddingsPerCall() {
89
- return 2048;
90
- }
91
- get supportsParallelCalls() {
92
- return true;
93
- }
94
100
  async doEmbed({
95
101
  values,
96
102
  headers,
97
- abortSignal
103
+ abortSignal,
104
+ providerOptions
98
105
  }) {
106
+ var _a;
107
+ const googleOptions = (_a = await (0, import_provider_utils2.parseProviderOptions)({
108
+ provider: "google",
109
+ providerOptions,
110
+ schema: googleVertexEmbeddingProviderOptions
111
+ })) != null ? _a : {};
99
112
  if (values.length > this.maxEmbeddingsPerCall) {
100
113
  throw new import_provider.TooManyEmbeddingValuesForCallError({
101
114
  provider: this.provider,
@@ -109,13 +122,17 @@ var GoogleVertexEmbeddingModel = class {
109
122
  headers
110
123
  );
111
124
  const url = `${this.config.baseURL}/models/${this.modelId}:predict`;
112
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
125
+ const {
126
+ responseHeaders,
127
+ value: response,
128
+ rawValue
129
+ } = await (0, import_provider_utils2.postJsonToApi)({
113
130
  url,
114
131
  headers: mergedHeaders,
115
132
  body: {
116
133
  instances: values.map((value) => ({ content: value })),
117
134
  parameters: {
118
- outputDimensionality: this.settings.outputDimensionality
135
+ outputDimensionality: googleOptions.outputDimensionality
119
136
  }
120
137
  },
121
138
  failedResponseHandler: googleVertexFailedResponseHandler,
@@ -135,43 +152,37 @@ var GoogleVertexEmbeddingModel = class {
135
152
  0
136
153
  )
137
154
  },
138
- rawResponse: { headers: responseHeaders }
155
+ response: { headers: responseHeaders, body: rawValue }
139
156
  };
140
157
  }
141
158
  };
142
- var googleVertexTextEmbeddingResponseSchema = import_zod2.z.object({
143
- predictions: import_zod2.z.array(
144
- import_zod2.z.object({
145
- embeddings: import_zod2.z.object({
146
- values: import_zod2.z.array(import_zod2.z.number()),
147
- statistics: import_zod2.z.object({
148
- token_count: import_zod2.z.number()
159
+ var googleVertexTextEmbeddingResponseSchema = import_zod3.z.object({
160
+ predictions: import_zod3.z.array(
161
+ import_zod3.z.object({
162
+ embeddings: import_zod3.z.object({
163
+ values: import_zod3.z.array(import_zod3.z.number()),
164
+ statistics: import_zod3.z.object({
165
+ token_count: import_zod3.z.number()
149
166
  })
150
167
  })
151
168
  })
152
169
  )
153
170
  });
154
171
 
155
- // src/google-vertex-provider.ts
156
- var import_internal = require("@ai-sdk/google/internal");
157
-
158
172
  // src/google-vertex-image-model.ts
159
173
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
160
- var import_zod3 = require("zod");
174
+ var import_zod4 = require("zod");
161
175
  var GoogleVertexImageModel = class {
162
- constructor(modelId, settings, config) {
176
+ constructor(modelId, config) {
163
177
  this.modelId = modelId;
164
- this.settings = settings;
165
178
  this.config = config;
166
- this.specificationVersion = "v1";
179
+ this.specificationVersion = "v2";
180
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#parameter_list
181
+ this.maxImagesPerCall = 4;
167
182
  }
168
183
  get provider() {
169
184
  return this.config.provider;
170
185
  }
171
- get maxImagesPerCall() {
172
- var _a;
173
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 4;
174
- }
175
186
  async doGenerate({
176
187
  prompt,
177
188
  n,
@@ -191,7 +202,7 @@ var GoogleVertexImageModel = class {
191
202
  details: "This model does not support the `size` option. Use `aspectRatio` instead."
192
203
  });
193
204
  }
194
- const vertexImageOptions = (0, import_provider_utils3.parseProviderOptions)({
205
+ const vertexImageOptions = await (0, import_provider_utils3.parseProviderOptions)({
195
206
  provider: "vertex",
196
207
  providerOptions,
197
208
  schema: vertexImageProviderOptionsSchema
@@ -230,27 +241,22 @@ var GoogleVertexImageModel = class {
230
241
  };
231
242
  }
232
243
  };
233
- var vertexImageResponseSchema = import_zod3.z.object({
234
- predictions: import_zod3.z.array(import_zod3.z.object({ bytesBase64Encoded: import_zod3.z.string() })).nullish()
244
+ var vertexImageResponseSchema = import_zod4.z.object({
245
+ predictions: import_zod4.z.array(import_zod4.z.object({ bytesBase64Encoded: import_zod4.z.string() })).nullish()
235
246
  });
236
- var vertexImageProviderOptionsSchema = import_zod3.z.object({
237
- negativePrompt: import_zod3.z.string().nullish(),
238
- personGeneration: import_zod3.z.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
239
- safetySetting: import_zod3.z.enum([
247
+ var vertexImageProviderOptionsSchema = import_zod4.z.object({
248
+ negativePrompt: import_zod4.z.string().nullish(),
249
+ personGeneration: import_zod4.z.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
250
+ safetySetting: import_zod4.z.enum([
240
251
  "block_low_and_above",
241
252
  "block_medium_and_above",
242
253
  "block_only_high",
243
254
  "block_none"
244
255
  ]).nullish(),
245
- addWatermark: import_zod3.z.boolean().nullish(),
246
- storageUri: import_zod3.z.string().nullish()
256
+ addWatermark: import_zod4.z.boolean().nullish(),
257
+ storageUri: import_zod4.z.string().nullish()
247
258
  });
248
259
 
249
- // src/google-vertex-supported-file-url.ts
250
- function isSupportedFileUrl(url) {
251
- return ["http:", "https:", "gs:"].includes(url.protocol);
252
- }
253
-
254
260
  // src/google-vertex-provider.ts
255
261
  function createVertex(options = {}) {
256
262
  const loadVertexProject = () => (0, import_provider_utils4.loadSetting)({
@@ -280,27 +286,30 @@ function createVertex(options = {}) {
280
286
  baseURL: loadBaseURL()
281
287
  };
282
288
  };
283
- const createChatModel = (modelId, settings = {}) => {
289
+ const createChatModel = (modelId) => {
284
290
  var _a;
285
- return new import_internal.GoogleGenerativeAILanguageModel(modelId, settings, {
291
+ return new import_internal.GoogleGenerativeAILanguageModel(modelId, {
286
292
  ...createConfig("chat"),
287
293
  generateId: (_a = options.generateId) != null ? _a : import_provider_utils4.generateId,
288
- isSupportedUrl: isSupportedFileUrl
294
+ supportedUrls: () => ({
295
+ "*": [
296
+ // HTTP URLs:
297
+ /^https?:\/\/.*$/,
298
+ // Google Cloud Storage URLs:
299
+ /^gs:\/\/.*$/
300
+ ]
301
+ })
289
302
  });
290
303
  };
291
- const createEmbeddingModel = (modelId, settings = {}) => new GoogleVertexEmbeddingModel(
292
- modelId,
293
- settings,
294
- createConfig("embedding")
295
- );
296
- const createImageModel = (modelId, settings = {}) => new GoogleVertexImageModel(modelId, settings, createConfig("image"));
297
- const provider = function(modelId, settings) {
304
+ const createEmbeddingModel = (modelId) => new GoogleVertexEmbeddingModel(modelId, createConfig("embedding"));
305
+ const createImageModel = (modelId) => new GoogleVertexImageModel(modelId, createConfig("image"));
306
+ const provider = function(modelId) {
298
307
  if (new.target) {
299
308
  throw new Error(
300
309
  "The Google Vertex AI model function cannot be called with the new keyword."
301
310
  );
302
311
  }
303
- return createChatModel(modelId, settings);
312
+ return createChatModel(modelId);
304
313
  };
305
314
  provider.languageModel = createChatModel;
306
315
  provider.textEmbeddingModel = createEmbeddingModel;