@ai-sdk/google-vertex 4.0.23 → 4.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/anthropic/edge/index.js +1 -1
- package/dist/anthropic/edge/index.mjs +1 -1
- package/dist/edge/index.js +1 -1
- package/dist/edge/index.mjs +1 -1
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/docs/16-google-vertex.mdx +1407 -0
- package/package.json +10 -5
- package/src/__snapshots__/google-vertex-embedding-model.test.ts.snap +39 -0
- package/src/anthropic/edge/google-vertex-anthropic-provider-edge.test.ts +87 -0
- package/src/anthropic/edge/google-vertex-anthropic-provider-edge.ts +41 -0
- package/src/anthropic/edge/index.ts +8 -0
- package/src/anthropic/google-vertex-anthropic-messages-options.ts +15 -0
- package/src/anthropic/google-vertex-anthropic-provider-node.test.ts +73 -0
- package/src/anthropic/google-vertex-anthropic-provider-node.ts +40 -0
- package/src/anthropic/google-vertex-anthropic-provider.test.ts +208 -0
- package/src/anthropic/google-vertex-anthropic-provider.ts +210 -0
- package/src/anthropic/index.ts +8 -0
- package/src/edge/google-vertex-auth-edge.test.ts +308 -0
- package/src/edge/google-vertex-auth-edge.ts +161 -0
- package/src/edge/google-vertex-provider-edge.test.ts +105 -0
- package/src/edge/google-vertex-provider-edge.ts +50 -0
- package/src/edge/index.ts +5 -0
- package/src/google-vertex-auth-google-auth-library.test.ts +59 -0
- package/src/google-vertex-auth-google-auth-library.ts +27 -0
- package/src/google-vertex-config.ts +8 -0
- package/src/google-vertex-embedding-model.test.ts +315 -0
- package/src/google-vertex-embedding-model.ts +135 -0
- package/src/google-vertex-embedding-options.ts +63 -0
- package/src/google-vertex-error.ts +19 -0
- package/src/google-vertex-image-model.test.ts +926 -0
- package/src/google-vertex-image-model.ts +288 -0
- package/src/google-vertex-image-settings.ts +8 -0
- package/src/google-vertex-options.ts +32 -0
- package/src/google-vertex-provider-node.test.ts +88 -0
- package/src/google-vertex-provider-node.ts +49 -0
- package/src/google-vertex-provider.test.ts +318 -0
- package/src/google-vertex-provider.ts +217 -0
- package/src/google-vertex-tools.ts +11 -0
- package/src/index.ts +7 -0
- package/src/version.ts +6 -0
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
import {
|
|
2
|
+
ImageModelV3,
|
|
3
|
+
ImageModelV3File,
|
|
4
|
+
SharedV3Warning,
|
|
5
|
+
} from '@ai-sdk/provider';
|
|
6
|
+
import {
|
|
7
|
+
Resolvable,
|
|
8
|
+
combineHeaders,
|
|
9
|
+
convertUint8ArrayToBase64,
|
|
10
|
+
createJsonResponseHandler,
|
|
11
|
+
parseProviderOptions,
|
|
12
|
+
postJsonToApi,
|
|
13
|
+
resolve,
|
|
14
|
+
} from '@ai-sdk/provider-utils';
|
|
15
|
+
import { z } from 'zod/v4';
|
|
16
|
+
import { googleVertexFailedResponseHandler } from './google-vertex-error';
|
|
17
|
+
import { GoogleVertexImageModelId } from './google-vertex-image-settings';
|
|
18
|
+
|
|
19
|
+
interface GoogleVertexImageModelConfig {
|
|
20
|
+
provider: string;
|
|
21
|
+
baseURL: string;
|
|
22
|
+
headers?: Resolvable<Record<string, string | undefined>>;
|
|
23
|
+
fetch?: typeof fetch;
|
|
24
|
+
_internal?: {
|
|
25
|
+
currentDate?: () => Date;
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// https://cloud.google.com/vertex-ai/generative-ai/docs/image/generate-images
|
|
30
|
+
export class GoogleVertexImageModel implements ImageModelV3 {
|
|
31
|
+
readonly specificationVersion = 'v3';
|
|
32
|
+
// https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#parameter_list
|
|
33
|
+
readonly maxImagesPerCall = 4;
|
|
34
|
+
|
|
35
|
+
get provider(): string {
|
|
36
|
+
return this.config.provider;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
constructor(
|
|
40
|
+
readonly modelId: GoogleVertexImageModelId,
|
|
41
|
+
private config: GoogleVertexImageModelConfig,
|
|
42
|
+
) {}
|
|
43
|
+
|
|
44
|
+
async doGenerate({
|
|
45
|
+
prompt,
|
|
46
|
+
n,
|
|
47
|
+
size,
|
|
48
|
+
aspectRatio,
|
|
49
|
+
seed,
|
|
50
|
+
providerOptions,
|
|
51
|
+
headers,
|
|
52
|
+
abortSignal,
|
|
53
|
+
files,
|
|
54
|
+
mask,
|
|
55
|
+
}: Parameters<ImageModelV3['doGenerate']>[0]): Promise<
|
|
56
|
+
Awaited<ReturnType<ImageModelV3['doGenerate']>>
|
|
57
|
+
> {
|
|
58
|
+
const warnings: Array<SharedV3Warning> = [];
|
|
59
|
+
|
|
60
|
+
if (size != null) {
|
|
61
|
+
warnings.push({
|
|
62
|
+
type: 'unsupported',
|
|
63
|
+
feature: 'size',
|
|
64
|
+
details:
|
|
65
|
+
'This model does not support the `size` option. Use `aspectRatio` instead.',
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
const vertexImageOptions = await parseProviderOptions({
|
|
70
|
+
provider: 'vertex',
|
|
71
|
+
providerOptions,
|
|
72
|
+
schema: vertexImageProviderOptionsSchema,
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
// Extract edit-specific options from provider options
|
|
76
|
+
const { edit, ...otherOptions } = vertexImageOptions ?? {};
|
|
77
|
+
const { mode: editMode, baseSteps, maskMode, maskDilation } = edit ?? {};
|
|
78
|
+
|
|
79
|
+
// Build the request body based on whether we're editing or generating
|
|
80
|
+
const isEditMode = files != null && files.length > 0;
|
|
81
|
+
|
|
82
|
+
let body: Record<string, unknown>;
|
|
83
|
+
|
|
84
|
+
if (isEditMode) {
|
|
85
|
+
// Build reference images for editing
|
|
86
|
+
const referenceImages: Array<Record<string, unknown>> = [];
|
|
87
|
+
|
|
88
|
+
// Add the source image(s)
|
|
89
|
+
for (let i = 0; i < files.length; i++) {
|
|
90
|
+
const file = files[i];
|
|
91
|
+
referenceImages.push({
|
|
92
|
+
referenceType: 'REFERENCE_TYPE_RAW',
|
|
93
|
+
referenceId: i + 1,
|
|
94
|
+
referenceImage: {
|
|
95
|
+
bytesBase64Encoded: getBase64Data(file),
|
|
96
|
+
},
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Add mask if provided
|
|
101
|
+
if (mask != null) {
|
|
102
|
+
referenceImages.push({
|
|
103
|
+
referenceType: 'REFERENCE_TYPE_MASK',
|
|
104
|
+
referenceId: files.length + 1,
|
|
105
|
+
referenceImage: {
|
|
106
|
+
bytesBase64Encoded: getBase64Data(mask),
|
|
107
|
+
},
|
|
108
|
+
maskImageConfig: {
|
|
109
|
+
maskMode: maskMode ?? 'MASK_MODE_USER_PROVIDED',
|
|
110
|
+
...(maskDilation != null ? { dilation: maskDilation } : {}),
|
|
111
|
+
},
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
body = {
|
|
116
|
+
instances: [
|
|
117
|
+
{
|
|
118
|
+
prompt,
|
|
119
|
+
referenceImages,
|
|
120
|
+
},
|
|
121
|
+
],
|
|
122
|
+
parameters: {
|
|
123
|
+
sampleCount: n,
|
|
124
|
+
...(aspectRatio != null ? { aspectRatio } : {}),
|
|
125
|
+
...(seed != null ? { seed } : {}),
|
|
126
|
+
editMode: editMode ?? 'EDIT_MODE_INPAINT_INSERTION',
|
|
127
|
+
...(baseSteps != null ? { editConfig: { baseSteps } } : {}),
|
|
128
|
+
...otherOptions,
|
|
129
|
+
},
|
|
130
|
+
};
|
|
131
|
+
} else {
|
|
132
|
+
// Standard image generation
|
|
133
|
+
body = {
|
|
134
|
+
instances: [{ prompt }],
|
|
135
|
+
parameters: {
|
|
136
|
+
sampleCount: n,
|
|
137
|
+
...(aspectRatio != null ? { aspectRatio } : {}),
|
|
138
|
+
...(seed != null ? { seed } : {}),
|
|
139
|
+
...otherOptions,
|
|
140
|
+
},
|
|
141
|
+
};
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
const currentDate = this.config._internal?.currentDate?.() ?? new Date();
|
|
145
|
+
const { value: response, responseHeaders } = await postJsonToApi({
|
|
146
|
+
url: `${this.config.baseURL}/models/${this.modelId}:predict`,
|
|
147
|
+
headers: combineHeaders(await resolve(this.config.headers), headers),
|
|
148
|
+
body,
|
|
149
|
+
failedResponseHandler: googleVertexFailedResponseHandler,
|
|
150
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
151
|
+
vertexImageResponseSchema,
|
|
152
|
+
),
|
|
153
|
+
abortSignal,
|
|
154
|
+
fetch: this.config.fetch,
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
return {
|
|
158
|
+
images:
|
|
159
|
+
response.predictions?.map(
|
|
160
|
+
({ bytesBase64Encoded }) => bytesBase64Encoded,
|
|
161
|
+
) ?? [],
|
|
162
|
+
warnings,
|
|
163
|
+
response: {
|
|
164
|
+
timestamp: currentDate,
|
|
165
|
+
modelId: this.modelId,
|
|
166
|
+
headers: responseHeaders,
|
|
167
|
+
},
|
|
168
|
+
providerMetadata: {
|
|
169
|
+
vertex: {
|
|
170
|
+
images:
|
|
171
|
+
response.predictions?.map(prediction => {
|
|
172
|
+
const {
|
|
173
|
+
// normalize revised prompt property
|
|
174
|
+
prompt: revisedPrompt,
|
|
175
|
+
} = prediction;
|
|
176
|
+
|
|
177
|
+
return { ...(revisedPrompt != null && { revisedPrompt }) };
|
|
178
|
+
}) ?? [],
|
|
179
|
+
},
|
|
180
|
+
},
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// minimal version of the schema, focussed on what is needed for the implementation
|
|
186
|
+
// this approach limits breakages when the API changes and increases efficiency
|
|
187
|
+
const vertexImageResponseSchema = z.object({
|
|
188
|
+
predictions: z
|
|
189
|
+
.array(
|
|
190
|
+
z.object({
|
|
191
|
+
bytesBase64Encoded: z.string(),
|
|
192
|
+
mimeType: z.string(),
|
|
193
|
+
prompt: z.string().nullish(),
|
|
194
|
+
}),
|
|
195
|
+
)
|
|
196
|
+
.nullish(),
|
|
197
|
+
});
|
|
198
|
+
|
|
199
|
+
const vertexImageProviderOptionsSchema = z.object({
|
|
200
|
+
negativePrompt: z.string().nullish(),
|
|
201
|
+
personGeneration: z
|
|
202
|
+
.enum(['dont_allow', 'allow_adult', 'allow_all'])
|
|
203
|
+
.nullish(),
|
|
204
|
+
safetySetting: z
|
|
205
|
+
.enum([
|
|
206
|
+
'block_low_and_above',
|
|
207
|
+
'block_medium_and_above',
|
|
208
|
+
'block_only_high',
|
|
209
|
+
'block_none',
|
|
210
|
+
])
|
|
211
|
+
.nullish(),
|
|
212
|
+
addWatermark: z.boolean().nullish(),
|
|
213
|
+
storageUri: z.string().nullish(),
|
|
214
|
+
sampleImageSize: z.enum(['1K', '2K']).nullish(),
|
|
215
|
+
/**
|
|
216
|
+
* Configuration for image editing operations
|
|
217
|
+
*/
|
|
218
|
+
edit: z
|
|
219
|
+
.object({
|
|
220
|
+
/**
|
|
221
|
+
* An integer that represents the number of sampling steps.
|
|
222
|
+
* A higher value offers better image quality, a lower value offers better latency.
|
|
223
|
+
* Try 35 steps to start. If the quality doesn't meet your requirements,
|
|
224
|
+
* increase the value towards an upper limit of 75.
|
|
225
|
+
*/
|
|
226
|
+
baseSteps: z.number().nullish(),
|
|
227
|
+
|
|
228
|
+
// Edit mode options
|
|
229
|
+
// https://cloud.google.com/vertex-ai/generative-ai/docs/image/edit-insert-objects
|
|
230
|
+
mode: z
|
|
231
|
+
.enum([
|
|
232
|
+
'EDIT_MODE_INPAINT_INSERTION',
|
|
233
|
+
'EDIT_MODE_INPAINT_REMOVAL',
|
|
234
|
+
'EDIT_MODE_OUTPAINT',
|
|
235
|
+
'EDIT_MODE_CONTROLLED_EDITING',
|
|
236
|
+
'EDIT_MODE_PRODUCT_IMAGE',
|
|
237
|
+
'EDIT_MODE_BGSWAP',
|
|
238
|
+
])
|
|
239
|
+
.nullish(),
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* The mask mode to use.
|
|
243
|
+
* - `MASK_MODE_DEFAULT` - Default value for mask mode.
|
|
244
|
+
* - `MASK_MODE_USER_PROVIDED` - User provided mask. No segmentation needed.
|
|
245
|
+
* - `MASK_MODE_DETECTION_BOX` - Mask from detected bounding boxes.
|
|
246
|
+
* - `MASK_MODE_CLOTHING_AREA` - Masks from segmenting the clothing area with open-vocab segmentation.
|
|
247
|
+
* - `MASK_MODE_PARSED_PERSON` - Masks from segmenting the person body and clothing using the person-parsing model.
|
|
248
|
+
*/
|
|
249
|
+
maskMode: z
|
|
250
|
+
.enum([
|
|
251
|
+
'MASK_MODE_DEFAULT',
|
|
252
|
+
'MASK_MODE_USER_PROVIDED',
|
|
253
|
+
'MASK_MODE_DETECTION_BOX',
|
|
254
|
+
'MASK_MODE_CLOTHING_AREA',
|
|
255
|
+
'MASK_MODE_PARSED_PERSON',
|
|
256
|
+
])
|
|
257
|
+
.nullish(),
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Optional. A float value between 0 and 1, inclusive, that represents the
|
|
261
|
+
* percentage of the image width to grow the mask by. Using dilation helps
|
|
262
|
+
* compensate for imprecise masks. We recommend a value of 0.01.
|
|
263
|
+
*/
|
|
264
|
+
maskDilation: z.number().nullish(),
|
|
265
|
+
})
|
|
266
|
+
.nullish(),
|
|
267
|
+
});
|
|
268
|
+
export type GoogleVertexImageProviderOptions = z.infer<
|
|
269
|
+
typeof vertexImageProviderOptionsSchema
|
|
270
|
+
>;
|
|
271
|
+
|
|
272
|
+
/**
|
|
273
|
+
* Helper to convert ImageModelV3File data to base64 string
|
|
274
|
+
*/
|
|
275
|
+
function getBase64Data(file: ImageModelV3File): string {
|
|
276
|
+
if (file.type === 'url') {
|
|
277
|
+
throw new Error(
|
|
278
|
+
'URL-based images are not supported for Google Vertex image editing. Please provide the image data directly.',
|
|
279
|
+
);
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
if (typeof file.data === 'string') {
|
|
283
|
+
return file.data;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// Convert Uint8Array to base64
|
|
287
|
+
return convertUint8ArrayToBase64(file.data);
|
|
288
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions
|
|
2
|
+
// Note preview and experimental models may only be detailed in AI Studio:
|
|
3
|
+
// https://console.cloud.google.com/vertex-ai/studio/
|
|
4
|
+
export type GoogleVertexModelId =
|
|
5
|
+
// Stable models
|
|
6
|
+
| 'gemini-2.5-pro'
|
|
7
|
+
| 'gemini-2.5-flash'
|
|
8
|
+
| 'gemini-2.5-flash-lite'
|
|
9
|
+
| 'gemini-2.0-flash-lite'
|
|
10
|
+
| 'gemini-2.0-flash'
|
|
11
|
+
| 'gemini-2.0-flash-001'
|
|
12
|
+
| 'gemini-1.5-flash'
|
|
13
|
+
| 'gemini-1.5-flash-001'
|
|
14
|
+
| 'gemini-1.5-flash-002'
|
|
15
|
+
| 'gemini-1.5-pro'
|
|
16
|
+
| 'gemini-1.5-pro-001'
|
|
17
|
+
| 'gemini-1.5-pro-002'
|
|
18
|
+
| 'gemini-1.0-pro-001'
|
|
19
|
+
| 'gemini-1.0-pro-vision-001'
|
|
20
|
+
| 'gemini-1.0-pro'
|
|
21
|
+
| 'gemini-1.0-pro-002'
|
|
22
|
+
// Preview models
|
|
23
|
+
| 'gemini-2.0-flash-lite-preview-02-05'
|
|
24
|
+
| 'gemini-2.5-flash-lite-preview-09-2025'
|
|
25
|
+
| 'gemini-2.5-flash-preview-09-2025'
|
|
26
|
+
| 'gemini-3-pro-preview'
|
|
27
|
+
| 'gemini-3-pro-image-preview'
|
|
28
|
+
| 'gemini-3-flash-preview'
|
|
29
|
+
// Experimental models
|
|
30
|
+
| 'gemini-2.0-pro-exp-02-05'
|
|
31
|
+
| 'gemini-2.0-flash-exp'
|
|
32
|
+
| (string & {});
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { resolve } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { createVertex as createVertexOriginal } from './google-vertex-provider';
|
|
3
|
+
import { createVertex as createVertexNode } from './google-vertex-provider-node';
|
|
4
|
+
import { generateAuthToken } from './google-vertex-auth-google-auth-library';
|
|
5
|
+
import { describe, beforeEach, afterEach, expect, it, vi } from 'vitest';
|
|
6
|
+
|
|
7
|
+
// Mock the imported modules
|
|
8
|
+
vi.mock('./google-vertex-auth-google-auth-library', () => ({
|
|
9
|
+
generateAuthToken: vi.fn().mockResolvedValue('mock-auth-token'),
|
|
10
|
+
}));
|
|
11
|
+
|
|
12
|
+
vi.mock('./google-vertex-provider', () => ({
|
|
13
|
+
createVertex: vi.fn().mockImplementation(options => ({
|
|
14
|
+
...options,
|
|
15
|
+
})),
|
|
16
|
+
}));
|
|
17
|
+
|
|
18
|
+
describe('google-vertex-provider-node', () => {
|
|
19
|
+
beforeEach(() => {
|
|
20
|
+
vi.clearAllMocks();
|
|
21
|
+
delete process.env.GOOGLE_VERTEX_API_KEY;
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
afterEach(() => {
|
|
25
|
+
delete process.env.GOOGLE_VERTEX_API_KEY;
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
it('default headers function should return auth token', async () => {
|
|
29
|
+
createVertexNode({ project: 'test-project' });
|
|
30
|
+
|
|
31
|
+
expect(createVertexOriginal).toHaveBeenCalledTimes(1);
|
|
32
|
+
const passedOptions = vi.mocked(createVertexOriginal).mock.calls[0][0];
|
|
33
|
+
|
|
34
|
+
expect(typeof passedOptions?.headers).toBe('function');
|
|
35
|
+
expect(await resolve(passedOptions?.headers)).toStrictEqual({
|
|
36
|
+
Authorization: 'Bearer mock-auth-token',
|
|
37
|
+
});
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
it('should use custom headers in addition to auth token when provided', async () => {
|
|
41
|
+
createVertexNode({
|
|
42
|
+
project: 'test-project',
|
|
43
|
+
headers: async () => ({
|
|
44
|
+
'Custom-Header': 'custom-value',
|
|
45
|
+
}),
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
expect(createVertexOriginal).toHaveBeenCalledTimes(1);
|
|
49
|
+
const passedOptions = vi.mocked(createVertexOriginal).mock.calls[0][0];
|
|
50
|
+
|
|
51
|
+
expect(await resolve(passedOptions?.headers)).toEqual({
|
|
52
|
+
Authorization: 'Bearer mock-auth-token',
|
|
53
|
+
'Custom-Header': 'custom-value',
|
|
54
|
+
});
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
it('passes googleAuthOptions to generateAuthToken', async () => {
|
|
58
|
+
createVertexNode({
|
|
59
|
+
googleAuthOptions: {
|
|
60
|
+
scopes: ['https://www.googleapis.com/auth/cloud-platform'],
|
|
61
|
+
keyFile: 'path/to/key.json',
|
|
62
|
+
},
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
expect(createVertexOriginal).toHaveBeenCalledTimes(1);
|
|
66
|
+
const passedOptions = vi.mocked(createVertexOriginal).mock.calls[0][0];
|
|
67
|
+
|
|
68
|
+
await resolve(passedOptions?.headers); // call the headers function
|
|
69
|
+
|
|
70
|
+
expect(generateAuthToken).toHaveBeenCalledWith({
|
|
71
|
+
scopes: ['https://www.googleapis.com/auth/cloud-platform'],
|
|
72
|
+
keyFile: 'path/to/key.json',
|
|
73
|
+
});
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
it('should pass options through to base provider when apiKey is provided', async () => {
|
|
77
|
+
createVertexNode({
|
|
78
|
+
apiKey: 'test-api-key',
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
expect(createVertexOriginal).toHaveBeenCalledTimes(1);
|
|
82
|
+
const passedOptions = vi.mocked(createVertexOriginal).mock.calls[0][0];
|
|
83
|
+
|
|
84
|
+
expect(passedOptions?.apiKey).toBe('test-api-key');
|
|
85
|
+
expect(passedOptions?.headers).toBeUndefined();
|
|
86
|
+
expect(generateAuthToken).not.toHaveBeenCalled();
|
|
87
|
+
});
|
|
88
|
+
});
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import { loadOptionalSetting, resolve } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { GoogleAuthOptions } from 'google-auth-library';
|
|
3
|
+
import { generateAuthToken } from './google-vertex-auth-google-auth-library';
|
|
4
|
+
import {
|
|
5
|
+
createVertex as createVertexOriginal,
|
|
6
|
+
GoogleVertexProvider,
|
|
7
|
+
GoogleVertexProviderSettings as GoogleVertexProviderSettingsOriginal,
|
|
8
|
+
} from './google-vertex-provider';
|
|
9
|
+
|
|
10
|
+
export interface GoogleVertexProviderSettings
|
|
11
|
+
extends GoogleVertexProviderSettingsOriginal {
|
|
12
|
+
/**
|
|
13
|
+
* Optional. The Authentication options provided by google-auth-library.
|
|
14
|
+
* Complete list of authentication options is documented in the
|
|
15
|
+
* GoogleAuthOptions interface:
|
|
16
|
+
* https://github.com/googleapis/google-auth-library-nodejs/blob/main/src/auth/googleauth.ts.
|
|
17
|
+
*/
|
|
18
|
+
googleAuthOptions?: GoogleAuthOptions;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export type { GoogleVertexProvider };
|
|
22
|
+
|
|
23
|
+
export function createVertex(
|
|
24
|
+
options: GoogleVertexProviderSettings = {},
|
|
25
|
+
): GoogleVertexProvider {
|
|
26
|
+
const apiKey = loadOptionalSetting({
|
|
27
|
+
settingValue: options.apiKey,
|
|
28
|
+
environmentVariableName: 'GOOGLE_VERTEX_API_KEY',
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
if (apiKey) {
|
|
32
|
+
return createVertexOriginal(options);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
return createVertexOriginal({
|
|
36
|
+
...options,
|
|
37
|
+
headers: async () => ({
|
|
38
|
+
Authorization: `Bearer ${await generateAuthToken(
|
|
39
|
+
options.googleAuthOptions,
|
|
40
|
+
)}`,
|
|
41
|
+
...(await resolve(options.headers)),
|
|
42
|
+
}),
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
Default Google Vertex AI provider instance.
|
|
48
|
+
*/
|
|
49
|
+
export const vertex = createVertex();
|