@ai-sdk/google-vertex 5.0.0-beta.5 → 5.0.0-beta.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +430 -8
  2. package/README.md +65 -1
  3. package/dist/anthropic/edge/index.d.ts +61 -16
  4. package/dist/anthropic/edge/index.js +67 -60
  5. package/dist/anthropic/edge/index.js.map +1 -1
  6. package/dist/anthropic/index.d.ts +61 -16
  7. package/dist/anthropic/index.js +57 -55
  8. package/dist/anthropic/index.js.map +1 -1
  9. package/dist/edge/index.d.ts +31 -22
  10. package/dist/edge/index.js +217 -176
  11. package/dist/edge/index.js.map +1 -1
  12. package/dist/index.d.ts +31 -22
  13. package/dist/index.js +208 -173
  14. package/dist/index.js.map +1 -1
  15. package/dist/maas/edge/index.d.ts +76 -0
  16. package/dist/maas/edge/index.js +196 -0
  17. package/dist/maas/edge/index.js.map +1 -0
  18. package/dist/maas/index.d.ts +60 -0
  19. package/dist/maas/index.js +101 -0
  20. package/dist/maas/index.js.map +1 -0
  21. package/docs/16-google-vertex.mdx +226 -6
  22. package/maas/edge.d.ts +1 -0
  23. package/maas/index.d.ts +1 -0
  24. package/package.json +29 -18
  25. package/src/anthropic/edge/google-vertex-anthropic-provider-edge.ts +1 -2
  26. package/src/anthropic/google-vertex-anthropic-messages-options.ts +1 -0
  27. package/src/anthropic/google-vertex-anthropic-provider-node.ts +1 -2
  28. package/src/anthropic/google-vertex-anthropic-provider.ts +33 -8
  29. package/src/edge/google-vertex-provider-edge.ts +1 -2
  30. package/src/google-vertex-config.ts +1 -1
  31. package/src/google-vertex-embedding-model.ts +23 -6
  32. package/src/google-vertex-embedding-options.ts +2 -0
  33. package/src/google-vertex-image-model.ts +38 -18
  34. package/src/google-vertex-options.ts +0 -1
  35. package/src/google-vertex-provider-node.ts +1 -2
  36. package/src/google-vertex-provider.ts +12 -12
  37. package/src/google-vertex-video-model.ts +7 -7
  38. package/src/maas/edge/google-vertex-maas-provider-edge.ts +65 -0
  39. package/src/maas/edge/index.ts +9 -0
  40. package/src/maas/google-vertex-maas-options.ts +15 -0
  41. package/src/maas/google-vertex-maas-provider-node.ts +64 -0
  42. package/src/maas/google-vertex-maas-provider.ts +111 -0
  43. package/src/maas/index.ts +9 -0
  44. package/dist/anthropic/edge/index.d.mts +0 -231
  45. package/dist/anthropic/edge/index.mjs +0 -259
  46. package/dist/anthropic/edge/index.mjs.map +0 -1
  47. package/dist/anthropic/index.d.mts +0 -215
  48. package/dist/anthropic/index.mjs +0 -164
  49. package/dist/anthropic/index.mjs.map +0 -1
  50. package/dist/edge/index.d.mts +0 -160
  51. package/dist/edge/index.mjs +0 -1049
  52. package/dist/edge/index.mjs.map +0 -1
  53. package/dist/index.d.mts +0 -219
  54. package/dist/index.mjs +0 -960
  55. package/dist/index.mjs.map +0 -1
@@ -1,7 +1,7 @@
1
1
  import {
2
- LanguageModelV3,
2
+ LanguageModelV4,
3
3
  NoSuchModelError,
4
- ProviderV3,
4
+ ProviderV4,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  FetchFunction,
@@ -78,24 +78,47 @@ export const vertexAnthropicTools = {
78
78
  * Creates a web search tool that gives Claude direct access to real-time web content.
79
79
  */
80
80
  webSearch_20250305: anthropicTools.webSearch_20250305,
81
+
82
+ /**
83
+ * Creates a tool search tool that uses regex patterns to find tools.
84
+ *
85
+ * The tool search tool enables Claude to work with hundreds or thousands of tools
86
+ * by dynamically discovering and loading them on-demand.
87
+ *
88
+ * Use `providerOptions: { anthropic: { deferLoading: true } }` on other tools
89
+ * to mark them for deferred loading.
90
+ */
91
+ toolSearchRegex_20251119: anthropicTools.toolSearchRegex_20251119,
92
+
93
+ /**
94
+ * Creates a tool search tool that uses BM25 (natural language) to find tools.
95
+ *
96
+ * The tool search tool enables Claude to work with hundreds or thousands of tools
97
+ * by dynamically discovering and loading them on-demand.
98
+ *
99
+ * Use `providerOptions: { anthropic: { deferLoading: true } }` on other tools
100
+ * to mark them for deferred loading.
101
+ */
102
+ toolSearchBm25_20251119: anthropicTools.toolSearchBm25_20251119,
81
103
  };
82
- export interface GoogleVertexAnthropicProvider extends ProviderV3 {
104
+ export interface GoogleVertexAnthropicProvider extends ProviderV4 {
83
105
  /**
84
106
  * Creates a model for text generation.
85
107
  */
86
- (modelId: GoogleVertexAnthropicMessagesModelId): LanguageModelV3;
108
+ (modelId: GoogleVertexAnthropicMessagesModelId): LanguageModelV4;
87
109
 
88
110
  /**
89
111
  * Creates a model for text generation.
90
112
  */
91
- languageModel(modelId: GoogleVertexAnthropicMessagesModelId): LanguageModelV3;
113
+ languageModel(modelId: GoogleVertexAnthropicMessagesModelId): LanguageModelV4;
92
114
 
93
115
  /**
94
116
  * Anthropic tools supported by Google Vertex.
95
117
  * Note: Only a subset of Anthropic tools are available on Vertex.
96
118
  * Supported tools: bash_20241022, bash_20250124, textEditor_20241022,
97
119
  * textEditor_20250124, textEditor_20250429, textEditor_20250728,
98
- * computer_20241022, webSearch_20250305
120
+ * computer_20241022, webSearch_20250305, toolSearchRegex_20251119,
121
+ * toolSearchBm25_20251119
99
122
  */
100
123
  tools: typeof vertexAnthropicTools;
101
124
 
@@ -169,7 +192,7 @@ export function createVertexAnthropic(
169
192
  }`,
170
193
  transformRequestBody: args => {
171
194
  // Remove model from args and add anthropic version
172
- const { model, ...rest } = args;
195
+ const { model: _model, ...rest } = args;
173
196
  return {
174
197
  ...rest,
175
198
  anthropic_version: 'vertex-2023-10-16',
@@ -179,6 +202,8 @@ export function createVertexAnthropic(
179
202
  supportedUrls: () => ({}),
180
203
  // force the use of JSON tool fallback for structured outputs since beta header isn't supported
181
204
  supportsNativeStructuredOutput: false,
205
+ // Vertex Anthropic doesn't support strict mode on tool definitions.
206
+ supportsStrictTools: false,
182
207
  });
183
208
 
184
209
  const provider = function (modelId: GoogleVertexAnthropicMessagesModelId) {
@@ -191,7 +216,7 @@ export function createVertexAnthropic(
191
216
  return createChatModel(modelId);
192
217
  };
193
218
 
194
- provider.specificationVersion = 'v3' as const;
219
+ provider.specificationVersion = 'v4' as const;
195
220
  provider.languageModel = createChatModel;
196
221
  provider.chat = createChatModel;
197
222
  provider.messages = createChatModel;
@@ -11,8 +11,7 @@ import {
11
11
 
12
12
  export type { GoogleVertexProvider };
13
13
 
14
- export interface GoogleVertexProviderSettings
15
- extends GoogleVertexProviderSettingsOriginal {
14
+ export interface GoogleVertexProviderSettings extends GoogleVertexProviderSettingsOriginal {
16
15
  /**
17
16
  * Optional. The Google credentials for the Google Cloud service account. If
18
17
  * not provided, the Google Vertex provider will use environment variables to
@@ -3,6 +3,6 @@ import { FetchFunction, Resolvable } from '@ai-sdk/provider-utils';
3
3
  export interface GoogleVertexConfig {
4
4
  provider: string;
5
5
  baseURL: string;
6
- headers: Resolvable<Record<string, string | undefined>>;
6
+ headers?: Resolvable<Record<string, string | undefined>>;
7
7
  fetch?: FetchFunction;
8
8
  }
@@ -1,5 +1,5 @@
1
1
  import {
2
- EmbeddingModelV3,
2
+ EmbeddingModelV4,
3
3
  TooManyEmbeddingValuesForCallError,
4
4
  } from '@ai-sdk/provider';
5
5
  import {
@@ -8,6 +8,9 @@ import {
8
8
  postJsonToApi,
9
9
  resolve,
10
10
  parseProviderOptions,
11
+ serializeModelOptions,
12
+ WORKFLOW_SERIALIZE,
13
+ WORKFLOW_DESERIALIZE,
11
14
  } from '@ai-sdk/provider-utils';
12
15
  import { z } from 'zod/v4';
13
16
  import { googleVertexFailedResponseHandler } from './google-vertex-error';
@@ -17,14 +20,28 @@ import {
17
20
  } from './google-vertex-embedding-options';
18
21
  import { GoogleVertexConfig } from './google-vertex-config';
19
22
 
20
- export class GoogleVertexEmbeddingModel implements EmbeddingModelV3 {
21
- readonly specificationVersion = 'v3';
23
+ export class GoogleVertexEmbeddingModel implements EmbeddingModelV4 {
24
+ readonly specificationVersion = 'v4';
22
25
  readonly modelId: GoogleVertexEmbeddingModelId;
23
26
  readonly maxEmbeddingsPerCall = 2048;
24
27
  readonly supportsParallelCalls = true;
25
28
 
26
29
  private readonly config: GoogleVertexConfig;
27
30
 
31
+ static [WORKFLOW_SERIALIZE](model: GoogleVertexEmbeddingModel) {
32
+ return serializeModelOptions({
33
+ modelId: model.modelId,
34
+ config: model.config,
35
+ });
36
+ }
37
+
38
+ static [WORKFLOW_DESERIALIZE](options: {
39
+ modelId: string;
40
+ config: GoogleVertexConfig;
41
+ }) {
42
+ return new GoogleVertexEmbeddingModel(options.modelId, options.config);
43
+ }
44
+
28
45
  get provider(): string {
29
46
  return this.config.provider;
30
47
  }
@@ -42,8 +59,8 @@ export class GoogleVertexEmbeddingModel implements EmbeddingModelV3 {
42
59
  headers,
43
60
  abortSignal,
44
61
  providerOptions,
45
- }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<
46
- Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>
62
+ }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<
63
+ Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>
47
64
  > {
48
65
  let googleOptions = await parseProviderOptions({
49
66
  provider: 'vertex',
@@ -71,7 +88,7 @@ export class GoogleVertexEmbeddingModel implements EmbeddingModelV3 {
71
88
  }
72
89
 
73
90
  const mergedHeaders = combineHeaders(
74
- await resolve(this.config.headers),
91
+ this.config.headers ? await resolve(this.config.headers) : undefined,
75
92
  headers,
76
93
  );
77
94
 
@@ -10,6 +10,8 @@ export type GoogleVertexEmbeddingModelId =
10
10
  | 'text-multilingual-embedding-002'
11
11
  | 'text-embedding-004'
12
12
  | 'text-embedding-005'
13
+ | 'gemini-embedding-001'
14
+ | 'gemini-embedding-2-preview'
13
15
  | (string & {});
14
16
 
15
17
  export const googleVertexEmbeddingModelOptions = z.object({
@@ -1,10 +1,10 @@
1
1
  import type { GoogleLanguageModelOptions } from '@ai-sdk/google';
2
2
  import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal';
3
3
  import {
4
- ImageModelV3,
5
- ImageModelV3File,
6
- LanguageModelV3Prompt,
7
- SharedV3Warning,
4
+ ImageModelV4,
5
+ ImageModelV4File,
6
+ LanguageModelV4Prompt,
7
+ SharedV4Warning,
8
8
  } from '@ai-sdk/provider';
9
9
  import {
10
10
  Resolvable,
@@ -16,6 +16,9 @@ import {
16
16
  parseProviderOptions,
17
17
  postJsonToApi,
18
18
  resolve,
19
+ serializeModelOptions,
20
+ WORKFLOW_SERIALIZE,
21
+ WORKFLOW_DESERIALIZE,
19
22
  } from '@ai-sdk/provider-utils';
20
23
  import { z } from 'zod/v4';
21
24
  import { googleVertexFailedResponseHandler } from './google-vertex-error';
@@ -33,8 +36,22 @@ interface GoogleVertexImageModelConfig {
33
36
  }
34
37
 
35
38
  // https://cloud.google.com/vertex-ai/generative-ai/docs/image/generate-images
36
- export class GoogleVertexImageModel implements ImageModelV3 {
37
- readonly specificationVersion = 'v3';
39
+ export class GoogleVertexImageModel implements ImageModelV4 {
40
+ readonly specificationVersion = 'v4';
41
+
42
+ static [WORKFLOW_SERIALIZE](model: GoogleVertexImageModel) {
43
+ return serializeModelOptions({
44
+ modelId: model.modelId,
45
+ config: model.config,
46
+ });
47
+ }
48
+
49
+ static [WORKFLOW_DESERIALIZE](options: {
50
+ modelId: string;
51
+ config: GoogleVertexImageModelConfig;
52
+ }) {
53
+ return new GoogleVertexImageModel(options.modelId, options.config);
54
+ }
38
55
 
39
56
  get maxImagesPerCall(): number {
40
57
  if (isGeminiModel(this.modelId)) {
@@ -53,8 +70,8 @@ export class GoogleVertexImageModel implements ImageModelV3 {
53
70
  ) {}
54
71
 
55
72
  async doGenerate(
56
- options: Parameters<ImageModelV3['doGenerate']>[0],
57
- ): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>> {
73
+ options: Parameters<ImageModelV4['doGenerate']>[0],
74
+ ): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>> {
58
75
  if (isGeminiModel(this.modelId)) {
59
76
  return this.doGenerateGemini(options);
60
77
  }
@@ -72,10 +89,10 @@ export class GoogleVertexImageModel implements ImageModelV3 {
72
89
  abortSignal,
73
90
  files,
74
91
  mask,
75
- }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<
76
- Awaited<ReturnType<ImageModelV3['doGenerate']>>
92
+ }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<
93
+ Awaited<ReturnType<ImageModelV4['doGenerate']>>
77
94
  > {
78
- const warnings: Array<SharedV3Warning> = [];
95
+ const warnings: Array<SharedV4Warning> = [];
79
96
 
80
97
  if (size != null) {
81
98
  warnings.push({
@@ -164,7 +181,10 @@ export class GoogleVertexImageModel implements ImageModelV3 {
164
181
  const currentDate = this.config._internal?.currentDate?.() ?? new Date();
165
182
  const { value: response, responseHeaders } = await postJsonToApi({
166
183
  url: `${this.config.baseURL}/models/${this.modelId}:predict`,
167
- headers: combineHeaders(await resolve(this.config.headers), headers),
184
+ headers: combineHeaders(
185
+ this.config.headers ? await resolve(this.config.headers) : undefined,
186
+ headers,
187
+ ),
168
188
  body,
169
189
  failedResponseHandler: googleVertexFailedResponseHandler,
170
190
  successfulResponseHandler: createJsonResponseHandler(
@@ -212,10 +232,10 @@ export class GoogleVertexImageModel implements ImageModelV3 {
212
232
  abortSignal,
213
233
  files,
214
234
  mask,
215
- }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<
216
- Awaited<ReturnType<ImageModelV3['doGenerate']>>
235
+ }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<
236
+ Awaited<ReturnType<ImageModelV4['doGenerate']>>
217
237
  > {
218
- const warnings: Array<SharedV3Warning> = [];
238
+ const warnings: Array<SharedV4Warning> = [];
219
239
 
220
240
  if (mask != null) {
221
241
  throw new Error(
@@ -268,7 +288,7 @@ export class GoogleVertexImageModel implements ImageModelV3 {
268
288
  }
269
289
  }
270
290
 
271
- const languageModelPrompt: LanguageModelV3Prompt = [
291
+ const languageModelPrompt: LanguageModelV4Prompt = [
272
292
  { role: 'user', content: userContent },
273
293
  ];
274
294
 
@@ -433,9 +453,9 @@ export type GoogleVertexImageModelOptions = z.infer<
433
453
  >;
434
454
 
435
455
  /**
436
- * Helper to convert ImageModelV3File data to base64 string
456
+ * Helper to convert ImageModelV4File data to base64 string
437
457
  */
438
- function getBase64Data(file: ImageModelV3File): string {
458
+ function getBase64Data(file: ImageModelV4File): string {
439
459
  if (file.type === 'url') {
440
460
  throw new Error(
441
461
  'URL-based images are not supported for Google Vertex image editing. Please provide the image data directly.',
@@ -21,7 +21,6 @@ export type GoogleVertexModelId =
21
21
  | 'gemini-1.0-pro-002'
22
22
  // Preview models
23
23
  | 'gemini-2.0-flash-lite-preview-02-05'
24
- | 'gemini-2.5-flash-lite-preview-09-2025'
25
24
  | 'gemini-2.5-flash-preview-09-2025'
26
25
  | 'gemini-3-pro-preview'
27
26
  | 'gemini-3-pro-image-preview'
@@ -7,8 +7,7 @@ import {
7
7
  GoogleVertexProviderSettings as GoogleVertexProviderSettingsOriginal,
8
8
  } from './google-vertex-provider';
9
9
 
10
- export interface GoogleVertexProviderSettings
11
- extends GoogleVertexProviderSettingsOriginal {
10
+ export interface GoogleVertexProviderSettings extends GoogleVertexProviderSettingsOriginal {
12
11
  /**
13
12
  * Optional. The Authentication options provided by google-auth-library.
14
13
  * Complete list of authentication options is documented in the
@@ -1,9 +1,9 @@
1
1
  import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal';
2
2
  import {
3
- Experimental_VideoModelV3,
4
- ImageModelV3,
5
- LanguageModelV3,
6
- ProviderV3,
3
+ Experimental_VideoModelV4,
4
+ ImageModelV4,
5
+ LanguageModelV4,
6
+ ProviderV4,
7
7
  } from '@ai-sdk/provider';
8
8
  import {
9
9
  FetchFunction,
@@ -47,23 +47,23 @@ function createExpressModeFetch(
47
47
  };
48
48
  }
49
49
 
50
- export interface GoogleVertexProvider extends ProviderV3 {
50
+ export interface GoogleVertexProvider extends ProviderV4 {
51
51
  /**
52
52
  * Creates a model for text generation.
53
53
  */
54
- (modelId: GoogleVertexModelId): LanguageModelV3;
54
+ (modelId: GoogleVertexModelId): LanguageModelV4;
55
55
 
56
- languageModel: (modelId: GoogleVertexModelId) => LanguageModelV3;
56
+ languageModel: (modelId: GoogleVertexModelId) => LanguageModelV4;
57
57
 
58
58
  /**
59
59
  * Creates a model for image generation.
60
60
  */
61
- image(modelId: GoogleVertexImageModelId): ImageModelV3;
61
+ image(modelId: GoogleVertexImageModelId): ImageModelV4;
62
62
 
63
63
  /**
64
64
  * Creates a model for image generation.
65
65
  */
66
- imageModel(modelId: GoogleVertexImageModelId): ImageModelV3;
66
+ imageModel(modelId: GoogleVertexImageModelId): ImageModelV4;
67
67
 
68
68
  tools: typeof googleVertexTools;
69
69
 
@@ -77,12 +77,12 @@ export interface GoogleVertexProvider extends ProviderV3 {
77
77
  /**
78
78
  * Creates a model for video generation.
79
79
  */
80
- video(modelId: GoogleVertexVideoModelId): Experimental_VideoModelV3;
80
+ video(modelId: GoogleVertexVideoModelId): Experimental_VideoModelV4;
81
81
 
82
82
  /**
83
83
  * Creates a model for video generation.
84
84
  */
85
- videoModel(modelId: GoogleVertexVideoModelId): Experimental_VideoModelV3;
85
+ videoModel(modelId: GoogleVertexVideoModelId): Experimental_VideoModelV4;
86
86
  }
87
87
 
88
88
  export interface GoogleVertexProviderSettings {
@@ -231,7 +231,7 @@ export function createVertex(
231
231
  return createChatModel(modelId);
232
232
  };
233
233
 
234
- provider.specificationVersion = 'v3' as const;
234
+ provider.specificationVersion = 'v4' as const;
235
235
  provider.languageModel = createChatModel;
236
236
  provider.embeddingModel = createEmbeddingModel;
237
237
  provider.textEmbeddingModel = createEmbeddingModel;
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  AISDKError,
3
- type Experimental_VideoModelV3,
4
- type SharedV3Warning,
3
+ type Experimental_VideoModelV4,
4
+ type SharedV4Warning,
5
5
  } from '@ai-sdk/provider';
6
6
  import {
7
7
  combineHeaders,
@@ -53,8 +53,8 @@ interface GoogleVertexVideoModelConfig {
53
53
  };
54
54
  }
55
55
 
56
- export class GoogleVertexVideoModel implements Experimental_VideoModelV3 {
57
- readonly specificationVersion = 'v3';
56
+ export class GoogleVertexVideoModel implements Experimental_VideoModelV4 {
57
+ readonly specificationVersion = 'v4';
58
58
 
59
59
  get provider(): string {
60
60
  return this.config.provider;
@@ -71,10 +71,10 @@ export class GoogleVertexVideoModel implements Experimental_VideoModelV3 {
71
71
  ) {}
72
72
 
73
73
  async doGenerate(
74
- options: Parameters<Experimental_VideoModelV3['doGenerate']>[0],
75
- ): Promise<Awaited<ReturnType<Experimental_VideoModelV3['doGenerate']>>> {
74
+ options: Parameters<Experimental_VideoModelV4['doGenerate']>[0],
75
+ ): Promise<Awaited<ReturnType<Experimental_VideoModelV4['doGenerate']>>> {
76
76
  const currentDate = this.config._internal?.currentDate?.() ?? new Date();
77
- const warnings: SharedV3Warning[] = [];
77
+ const warnings: SharedV4Warning[] = [];
78
78
 
79
79
  const vertexOptions = (await parseProviderOptions({
80
80
  provider: 'vertex',
@@ -0,0 +1,65 @@
1
+ import { FetchFunction, resolve } from '@ai-sdk/provider-utils';
2
+ import {
3
+ generateAuthToken,
4
+ GoogleCredentials,
5
+ } from '../../edge/google-vertex-auth-edge';
6
+ import {
7
+ createVertexMaas as createVertexMaasOriginal,
8
+ GoogleVertexMaasProvider,
9
+ GoogleVertexMaasProviderSettings as GoogleVertexMaasProviderSettingsOriginal,
10
+ } from '../google-vertex-maas-provider';
11
+
12
+ export type { GoogleVertexMaasProvider };
13
+
14
+ export interface GoogleVertexMaasProviderSettings extends GoogleVertexMaasProviderSettingsOriginal {
15
+ /**
16
+ * Optional. The Google credentials for the Google Cloud service account. If
17
+ * not provided, the Google Vertex provider will use environment variables to
18
+ * load the credentials.
19
+ */
20
+ googleCredentials?: GoogleCredentials;
21
+ }
22
+
23
+ /**
24
+ * Create a Google Vertex AI MaaS (Model as a Service) provider instance for Edge runtimes.
25
+ * Uses the OpenAI-compatible Chat Completions API for partner and open models.
26
+ * Automatically handles Google Cloud authentication.
27
+ *
28
+ * @see https://cloud.google.com/vertex-ai/generative-ai/docs/maas/use-open-models
29
+ */
30
+ export function createVertexMaas(
31
+ options: GoogleVertexMaasProviderSettings = {},
32
+ ): GoogleVertexMaasProvider {
33
+ // Create a custom fetch wrapper that adds auth headers
34
+ const customFetch: FetchFunction = async (url, init) => {
35
+ const token = await generateAuthToken(options.googleCredentials);
36
+ const resolvedHeaders = await resolve(options.headers);
37
+ const authHeaders = {
38
+ ...resolvedHeaders,
39
+ Authorization: `Bearer ${token}`,
40
+ };
41
+
42
+ // Merge auth headers with existing headers from init
43
+ const fetchInit = {
44
+ ...init,
45
+ headers: {
46
+ ...init?.headers,
47
+ ...authHeaders,
48
+ },
49
+ };
50
+
51
+ // Call the original fetch or user's custom fetch
52
+ return (options.fetch ?? fetch)(url, fetchInit);
53
+ };
54
+
55
+ return createVertexMaasOriginal({
56
+ ...options,
57
+ fetch: customFetch,
58
+ headers: undefined, // Don't pass headers, we handle them in fetch
59
+ });
60
+ }
61
+
62
+ /**
63
+ * Default Google Vertex AI MaaS provider instance for Edge runtimes.
64
+ */
65
+ export const vertexMaas = createVertexMaas();
@@ -0,0 +1,9 @@
1
+ export {
2
+ createVertexMaas,
3
+ vertexMaas,
4
+ } from './google-vertex-maas-provider-edge';
5
+ export type {
6
+ GoogleVertexMaasProvider,
7
+ GoogleVertexMaasProviderSettings,
8
+ } from './google-vertex-maas-provider-edge';
9
+ export type { GoogleVertexMaasModelId } from '../google-vertex-maas-options';
@@ -0,0 +1,15 @@
1
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/maas/use-open-models
2
+ export type GoogleVertexMaasModelId =
3
+ | 'deepseek-ai/deepseek-r1-0528-maas'
4
+ | 'deepseek-ai/deepseek-v3.1-maas'
5
+ | 'deepseek-ai/deepseek-v3.2-maas'
6
+ | 'openai/gpt-oss-120b-maas'
7
+ | 'openai/gpt-oss-20b-maas'
8
+ | 'meta/llama-4-maverick-17b-128e-instruct-maas'
9
+ | 'meta/llama-4-scout-17b-16e-instruct-maas'
10
+ | 'minimax/minimax-m2-maas'
11
+ | 'qwen/qwen3-coder-480b-a35b-instruct-maas'
12
+ | 'qwen/qwen3-next-80b-a3b-instruct-maas'
13
+ | 'qwen/qwen3-next-80b-a3b-thinking-maas'
14
+ | 'moonshotai/kimi-k2-thinking-maas'
15
+ | (string & {});
@@ -0,0 +1,64 @@
1
+ import { FetchFunction, resolve } from '@ai-sdk/provider-utils';
2
+ import { GoogleAuthOptions } from 'google-auth-library';
3
+ import { generateAuthToken } from '../google-vertex-auth-google-auth-library';
4
+ import {
5
+ createVertexMaas as createVertexMaasOriginal,
6
+ GoogleVertexMaasProvider,
7
+ GoogleVertexMaasProviderSettings as GoogleVertexMaasProviderSettingsOriginal,
8
+ } from './google-vertex-maas-provider';
9
+
10
+ export type { GoogleVertexMaasProvider };
11
+
12
+ export interface GoogleVertexMaasProviderSettings extends GoogleVertexMaasProviderSettingsOriginal {
13
+ /**
14
+ * Optional. The Authentication options provided by google-auth-library.
15
+ * Complete list of authentication options is documented in the
16
+ * GoogleAuthOptions interface:
17
+ * https://github.com/googleapis/google-auth-library-nodejs/blob/main/src/auth/googleauth.ts.
18
+ */
19
+ googleAuthOptions?: GoogleAuthOptions;
20
+ }
21
+
22
+ /**
23
+ * Create a Google Vertex AI MaaS (Model as a Service) provider instance for Node.js.
24
+ * Uses the OpenAI-compatible Chat Completions API for partner and open models.
25
+ * Automatically handles Google Cloud authentication.
26
+ *
27
+ * @see https://cloud.google.com/vertex-ai/generative-ai/docs/maas/use-open-models
28
+ */
29
+ export function createVertexMaas(
30
+ options: GoogleVertexMaasProviderSettings = {},
31
+ ): GoogleVertexMaasProvider {
32
+ // Create a custom fetch wrapper that adds auth headers
33
+ const customFetch: FetchFunction = async (url, init) => {
34
+ const token = await generateAuthToken(options.googleAuthOptions);
35
+ const resolvedHeaders = await resolve(options.headers);
36
+ const authHeaders = {
37
+ ...resolvedHeaders,
38
+ Authorization: `Bearer ${token}`,
39
+ };
40
+
41
+ // Merge auth headers with existing headers from init
42
+ const fetchInit = {
43
+ ...init,
44
+ headers: {
45
+ ...init?.headers,
46
+ ...authHeaders,
47
+ },
48
+ };
49
+
50
+ // Call the original fetch or user's custom fetch
51
+ return (options.fetch ?? fetch)(url, fetchInit);
52
+ };
53
+
54
+ return createVertexMaasOriginal({
55
+ ...options,
56
+ fetch: customFetch,
57
+ headers: undefined, // Don't pass headers, we handle them in fetch
58
+ });
59
+ }
60
+
61
+ /**
62
+ * Default Google Vertex AI MaaS provider instance for Node.js.
63
+ */
64
+ export const vertexMaas = createVertexMaas();