@ai-sdk/google-vertex 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +13 -0
- package/README.md +31 -0
- package/dist/index.d.mts +63 -0
- package/dist/index.d.ts +63 -0
- package/dist/index.js +371 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +348 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +68 -0
package/LICENSE
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
Copyright 2023 Vercel, Inc.
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
package/README.md
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
# Vercel AI SDK - Google Vertex AI Provider
|
2
|
+
|
3
|
+
The Google provider for the [Vercel AI SDK](https://sdk.vercel.ai/docs) contains language model support for the [Google Vertex AI](https://cloud.google.com/vertex-ai) APIs.
|
4
|
+
It creates language model objects that can be used with the `generateText` and `streamText` AI functions.
|
5
|
+
Tool calls are currently not supported.
|
6
|
+
|
7
|
+
## Setup
|
8
|
+
|
9
|
+
The Google provider is available in the `@ai-sdk/google-vertex` module. You can install it with
|
10
|
+
|
11
|
+
```bash
|
12
|
+
npm i @ai-sdk/google-vertex
|
13
|
+
```
|
14
|
+
|
15
|
+
## Provider Instance
|
16
|
+
|
17
|
+
You can import the default provider instance `vertex` from `@ai-sdk/google-vertex`:
|
18
|
+
|
19
|
+
```ts
|
20
|
+
import { vertex } from '@ai-sdk/google-vertex';
|
21
|
+
```
|
22
|
+
|
23
|
+
If you need a customized setup, you can import `createVertex` from `@ai-sdk/google-vertex` and create a provider instance with your settings:
|
24
|
+
|
25
|
+
```ts
|
26
|
+
import { createVertex } from '@ai-sdk/google-vertex';
|
27
|
+
|
28
|
+
const vertex = createVertex({
|
29
|
+
// custom settings
|
30
|
+
});
|
31
|
+
```
|
package/dist/index.d.mts
ADDED
@@ -0,0 +1,63 @@
|
|
1
|
+
import { VertexAI } from '@google-cloud/vertexai';
|
2
|
+
import { LanguageModelV1 } from '@ai-sdk/provider';
|
3
|
+
|
4
|
+
type GoogleVertexModelId = 'gemini-1.0-pro' | 'gemini-1.0-pro-vision' | (string & {});
|
5
|
+
interface GoogleVertexSettings {
|
6
|
+
/**
|
7
|
+
Optional. The maximum number of tokens to consider when sampling.
|
8
|
+
|
9
|
+
Models use nucleus sampling or combined Top-k and nucleus sampling.
|
10
|
+
Top-k sampling considers the set of topK most probable tokens.
|
11
|
+
Models running with nucleus sampling don't allow topK setting.
|
12
|
+
*/
|
13
|
+
topK?: number;
|
14
|
+
}
|
15
|
+
|
16
|
+
type GoogleVertexAIConfig = {
|
17
|
+
vertexAI: VertexAI;
|
18
|
+
generateId: () => string;
|
19
|
+
};
|
20
|
+
declare class GoogleVertexLanguageModel implements LanguageModelV1 {
|
21
|
+
readonly specificationVersion = "v1";
|
22
|
+
readonly provider = "google-vertex";
|
23
|
+
readonly defaultObjectGenerationMode: undefined;
|
24
|
+
readonly modelId: GoogleVertexModelId;
|
25
|
+
readonly settings: GoogleVertexSettings;
|
26
|
+
private readonly config;
|
27
|
+
constructor(modelId: GoogleVertexModelId, settings: GoogleVertexSettings, config: GoogleVertexAIConfig);
|
28
|
+
private getArgs;
|
29
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
30
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
31
|
+
}
|
32
|
+
|
33
|
+
interface GoogleVertexProvider {
|
34
|
+
/**
|
35
|
+
Creates a model for text generation.
|
36
|
+
*/
|
37
|
+
(modelId: GoogleVertexModelId, settings?: GoogleVertexSettings): GoogleVertexLanguageModel;
|
38
|
+
}
|
39
|
+
interface GoogleVertexProviderSettings {
|
40
|
+
/**
|
41
|
+
Your Google Vertex location. Defaults to the environment variable `GOOGLE_VERTEX_LOCATION`.
|
42
|
+
*/
|
43
|
+
location?: string;
|
44
|
+
/**
|
45
|
+
Your Google Vertex project. Defaults to the environment variable `GOOGLE_VERTEX_PROJECT`.
|
46
|
+
*/
|
47
|
+
project?: string;
|
48
|
+
generateId?: () => string;
|
49
|
+
createVertexAI?: ({ project, location, }: {
|
50
|
+
project: string;
|
51
|
+
location: string;
|
52
|
+
}) => VertexAI;
|
53
|
+
}
|
54
|
+
/**
|
55
|
+
Create a Google Vertex AI provider instance.
|
56
|
+
*/
|
57
|
+
declare function createVertex(options?: GoogleVertexProviderSettings): GoogleVertexProvider;
|
58
|
+
/**
|
59
|
+
Default Google Vertex AI provider instance.
|
60
|
+
*/
|
61
|
+
declare const vertex: GoogleVertexProvider;
|
62
|
+
|
63
|
+
export { type GoogleVertexProvider, type GoogleVertexProviderSettings, createVertex, vertex };
|
package/dist/index.d.ts
ADDED
@@ -0,0 +1,63 @@
|
|
1
|
+
import { VertexAI } from '@google-cloud/vertexai';
|
2
|
+
import { LanguageModelV1 } from '@ai-sdk/provider';
|
3
|
+
|
4
|
+
type GoogleVertexModelId = 'gemini-1.0-pro' | 'gemini-1.0-pro-vision' | (string & {});
|
5
|
+
interface GoogleVertexSettings {
|
6
|
+
/**
|
7
|
+
Optional. The maximum number of tokens to consider when sampling.
|
8
|
+
|
9
|
+
Models use nucleus sampling or combined Top-k and nucleus sampling.
|
10
|
+
Top-k sampling considers the set of topK most probable tokens.
|
11
|
+
Models running with nucleus sampling don't allow topK setting.
|
12
|
+
*/
|
13
|
+
topK?: number;
|
14
|
+
}
|
15
|
+
|
16
|
+
type GoogleVertexAIConfig = {
|
17
|
+
vertexAI: VertexAI;
|
18
|
+
generateId: () => string;
|
19
|
+
};
|
20
|
+
declare class GoogleVertexLanguageModel implements LanguageModelV1 {
|
21
|
+
readonly specificationVersion = "v1";
|
22
|
+
readonly provider = "google-vertex";
|
23
|
+
readonly defaultObjectGenerationMode: undefined;
|
24
|
+
readonly modelId: GoogleVertexModelId;
|
25
|
+
readonly settings: GoogleVertexSettings;
|
26
|
+
private readonly config;
|
27
|
+
constructor(modelId: GoogleVertexModelId, settings: GoogleVertexSettings, config: GoogleVertexAIConfig);
|
28
|
+
private getArgs;
|
29
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
30
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
31
|
+
}
|
32
|
+
|
33
|
+
interface GoogleVertexProvider {
|
34
|
+
/**
|
35
|
+
Creates a model for text generation.
|
36
|
+
*/
|
37
|
+
(modelId: GoogleVertexModelId, settings?: GoogleVertexSettings): GoogleVertexLanguageModel;
|
38
|
+
}
|
39
|
+
interface GoogleVertexProviderSettings {
|
40
|
+
/**
|
41
|
+
Your Google Vertex location. Defaults to the environment variable `GOOGLE_VERTEX_LOCATION`.
|
42
|
+
*/
|
43
|
+
location?: string;
|
44
|
+
/**
|
45
|
+
Your Google Vertex project. Defaults to the environment variable `GOOGLE_VERTEX_PROJECT`.
|
46
|
+
*/
|
47
|
+
project?: string;
|
48
|
+
generateId?: () => string;
|
49
|
+
createVertexAI?: ({ project, location, }: {
|
50
|
+
project: string;
|
51
|
+
location: string;
|
52
|
+
}) => VertexAI;
|
53
|
+
}
|
54
|
+
/**
|
55
|
+
Create a Google Vertex AI provider instance.
|
56
|
+
*/
|
57
|
+
declare function createVertex(options?: GoogleVertexProviderSettings): GoogleVertexProvider;
|
58
|
+
/**
|
59
|
+
Default Google Vertex AI provider instance.
|
60
|
+
*/
|
61
|
+
declare const vertex: GoogleVertexProvider;
|
62
|
+
|
63
|
+
export { type GoogleVertexProvider, type GoogleVertexProviderSettings, createVertex, vertex };
|
package/dist/index.js
ADDED
@@ -0,0 +1,371 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __defProp = Object.defineProperty;
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
6
|
+
var __export = (target, all) => {
|
7
|
+
for (var name in all)
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
9
|
+
};
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
12
|
+
for (let key of __getOwnPropNames(from))
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
15
|
+
}
|
16
|
+
return to;
|
17
|
+
};
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
19
|
+
|
20
|
+
// src/index.ts
|
21
|
+
var src_exports = {};
|
22
|
+
__export(src_exports, {
|
23
|
+
createVertex: () => createVertex,
|
24
|
+
vertex: () => vertex
|
25
|
+
});
|
26
|
+
module.exports = __toCommonJS(src_exports);
|
27
|
+
|
28
|
+
// src/google-vertex-provider.ts
|
29
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
30
|
+
var import_vertexai = require("@google-cloud/vertexai");
|
31
|
+
|
32
|
+
// src/google-vertex-language-model.ts
|
33
|
+
var import_provider2 = require("@ai-sdk/provider");
|
34
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
35
|
+
|
36
|
+
// src/convert-to-google-vertex-content-request.ts
|
37
|
+
var import_provider = require("@ai-sdk/provider");
|
38
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
39
|
+
function convertToGoogleVertexContentRequest(prompt) {
|
40
|
+
let systemInstruction = void 0;
|
41
|
+
const contents = [];
|
42
|
+
for (const { role, content } of prompt) {
|
43
|
+
switch (role) {
|
44
|
+
case "system": {
|
45
|
+
if (systemInstruction != null) {
|
46
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
47
|
+
functionality: "Multiple system messages"
|
48
|
+
});
|
49
|
+
}
|
50
|
+
systemInstruction = content;
|
51
|
+
break;
|
52
|
+
}
|
53
|
+
case "user": {
|
54
|
+
contents.push({
|
55
|
+
role: "user",
|
56
|
+
parts: content.map((part) => {
|
57
|
+
var _a;
|
58
|
+
switch (part.type) {
|
59
|
+
case "text": {
|
60
|
+
return { text: part.text };
|
61
|
+
}
|
62
|
+
case "image": {
|
63
|
+
if (part.image instanceof URL) {
|
64
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
65
|
+
functionality: "URL image parts"
|
66
|
+
});
|
67
|
+
} else {
|
68
|
+
return {
|
69
|
+
inlineData: {
|
70
|
+
data: (0, import_provider_utils.convertUint8ArrayToBase64)(part.image),
|
71
|
+
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg"
|
72
|
+
}
|
73
|
+
};
|
74
|
+
}
|
75
|
+
}
|
76
|
+
default: {
|
77
|
+
const _exhaustiveCheck = part;
|
78
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
79
|
+
functionality: `prompt part: ${_exhaustiveCheck}`
|
80
|
+
});
|
81
|
+
}
|
82
|
+
}
|
83
|
+
})
|
84
|
+
});
|
85
|
+
break;
|
86
|
+
}
|
87
|
+
case "assistant": {
|
88
|
+
contents.push({
|
89
|
+
role: "assistant",
|
90
|
+
parts: content.map((part) => {
|
91
|
+
switch (part.type) {
|
92
|
+
case "text": {
|
93
|
+
return { type: "text", text: part.text };
|
94
|
+
}
|
95
|
+
case "tool-call": {
|
96
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
97
|
+
functionality: "tool-call"
|
98
|
+
});
|
99
|
+
}
|
100
|
+
default: {
|
101
|
+
const _exhaustiveCheck = part;
|
102
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
103
|
+
functionality: `prompt part: ${_exhaustiveCheck}`
|
104
|
+
});
|
105
|
+
}
|
106
|
+
}
|
107
|
+
})
|
108
|
+
});
|
109
|
+
break;
|
110
|
+
}
|
111
|
+
case "tool": {
|
112
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
113
|
+
functionality: `role: tool`
|
114
|
+
});
|
115
|
+
}
|
116
|
+
default: {
|
117
|
+
const _exhaustiveCheck = role;
|
118
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
119
|
+
functionality: `role: ${_exhaustiveCheck}`
|
120
|
+
});
|
121
|
+
}
|
122
|
+
}
|
123
|
+
}
|
124
|
+
return {
|
125
|
+
systemInstruction,
|
126
|
+
contents
|
127
|
+
};
|
128
|
+
}
|
129
|
+
|
130
|
+
// src/map-google-vertex-finish-reason.ts
|
131
|
+
function mapGoogleVertexFinishReason({
|
132
|
+
finishReason,
|
133
|
+
hasToolCalls
|
134
|
+
}) {
|
135
|
+
switch (finishReason) {
|
136
|
+
case "STOP":
|
137
|
+
return hasToolCalls ? "tool-calls" : "stop";
|
138
|
+
case "MAX_TOKENS":
|
139
|
+
return "length";
|
140
|
+
case "BLOCKLIST":
|
141
|
+
case "PROHIBITED_CONTENT":
|
142
|
+
case "SPII":
|
143
|
+
case "RECITATION":
|
144
|
+
case "SAFETY":
|
145
|
+
return "content-filter";
|
146
|
+
case "FINISH_REASON_UNSPECIFIED":
|
147
|
+
case "OTHER":
|
148
|
+
default:
|
149
|
+
return "other";
|
150
|
+
}
|
151
|
+
}
|
152
|
+
|
153
|
+
// src/google-vertex-language-model.ts
|
154
|
+
var GoogleVertexLanguageModel = class {
|
155
|
+
constructor(modelId, settings, config) {
|
156
|
+
this.specificationVersion = "v1";
|
157
|
+
this.provider = "google-vertex";
|
158
|
+
this.defaultObjectGenerationMode = void 0;
|
159
|
+
this.modelId = modelId;
|
160
|
+
this.settings = settings;
|
161
|
+
this.config = config;
|
162
|
+
}
|
163
|
+
getArgs({
|
164
|
+
prompt,
|
165
|
+
mode,
|
166
|
+
frequencyPenalty,
|
167
|
+
presencePenalty,
|
168
|
+
seed,
|
169
|
+
maxTokens,
|
170
|
+
temperature,
|
171
|
+
topP
|
172
|
+
}) {
|
173
|
+
var _a;
|
174
|
+
const warnings = [];
|
175
|
+
if (frequencyPenalty != null) {
|
176
|
+
warnings.push({
|
177
|
+
type: "unsupported-setting",
|
178
|
+
setting: "frequencyPenalty"
|
179
|
+
});
|
180
|
+
}
|
181
|
+
if (presencePenalty != null) {
|
182
|
+
warnings.push({
|
183
|
+
type: "unsupported-setting",
|
184
|
+
setting: "presencePenalty"
|
185
|
+
});
|
186
|
+
}
|
187
|
+
if (seed != null) {
|
188
|
+
warnings.push({
|
189
|
+
type: "unsupported-setting",
|
190
|
+
setting: "seed"
|
191
|
+
});
|
192
|
+
}
|
193
|
+
const generationConfig = {
|
194
|
+
// model specific settings:
|
195
|
+
topK: this.settings.topK,
|
196
|
+
// standardized settings:
|
197
|
+
maxOutputTokens: maxTokens,
|
198
|
+
temperature,
|
199
|
+
topP
|
200
|
+
};
|
201
|
+
const type = mode.type;
|
202
|
+
switch (type) {
|
203
|
+
case "regular": {
|
204
|
+
if ((_a = mode.tools) == null ? void 0 : _a.length) {
|
205
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
206
|
+
functionality: "tools"
|
207
|
+
});
|
208
|
+
}
|
209
|
+
return {
|
210
|
+
model: this.config.vertexAI.getGenerativeModel({
|
211
|
+
model: this.modelId,
|
212
|
+
generationConfig
|
213
|
+
}),
|
214
|
+
contentRequest: convertToGoogleVertexContentRequest(prompt),
|
215
|
+
warnings
|
216
|
+
};
|
217
|
+
}
|
218
|
+
case "object-json": {
|
219
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
220
|
+
functionality: "object-json mode"
|
221
|
+
});
|
222
|
+
}
|
223
|
+
case "object-tool": {
|
224
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
225
|
+
functionality: "object-tool mode"
|
226
|
+
});
|
227
|
+
}
|
228
|
+
case "object-grammar": {
|
229
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
230
|
+
functionality: "object-grammar mode"
|
231
|
+
});
|
232
|
+
}
|
233
|
+
default: {
|
234
|
+
const _exhaustiveCheck = type;
|
235
|
+
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
236
|
+
}
|
237
|
+
}
|
238
|
+
}
|
239
|
+
async doGenerate(options) {
|
240
|
+
var _a, _b, _c;
|
241
|
+
const { model, contentRequest, warnings } = this.getArgs(options);
|
242
|
+
const { response } = await model.generateContent(contentRequest);
|
243
|
+
const firstCandidate = (_a = response.candidates) == null ? void 0 : _a[0];
|
244
|
+
if (firstCandidate == null) {
|
245
|
+
throw new import_provider2.NoContentGeneratedError({ message: "No candidates returned" });
|
246
|
+
}
|
247
|
+
const usageMetadata = response.usageMetadata;
|
248
|
+
return {
|
249
|
+
text: firstCandidate.content.parts.map((part) => part.text).join(""),
|
250
|
+
finishReason: mapGoogleVertexFinishReason({
|
251
|
+
finishReason: firstCandidate.finishReason,
|
252
|
+
hasToolCalls: false
|
253
|
+
}),
|
254
|
+
usage: {
|
255
|
+
promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
|
256
|
+
completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
|
257
|
+
},
|
258
|
+
rawCall: {
|
259
|
+
rawPrompt: contentRequest,
|
260
|
+
rawSettings: {}
|
261
|
+
},
|
262
|
+
warnings
|
263
|
+
};
|
264
|
+
}
|
265
|
+
async doStream(options) {
|
266
|
+
const { model, contentRequest, warnings } = this.getArgs(options);
|
267
|
+
const { stream } = await model.generateContentStream(contentRequest);
|
268
|
+
let finishReason = "other";
|
269
|
+
let usage = {
|
270
|
+
promptTokens: Number.NaN,
|
271
|
+
completionTokens: Number.NaN
|
272
|
+
};
|
273
|
+
return {
|
274
|
+
stream: (0, import_provider_utils2.convertAsyncGeneratorToReadableStream)(stream).pipeThrough(
|
275
|
+
new TransformStream(
|
276
|
+
{
|
277
|
+
transform(chunk, controller) {
|
278
|
+
var _a, _b, _c;
|
279
|
+
const usageMetadata = chunk.usageMetadata;
|
280
|
+
if (usageMetadata != null) {
|
281
|
+
usage = {
|
282
|
+
promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
|
283
|
+
completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
|
284
|
+
};
|
285
|
+
}
|
286
|
+
const firstCandidate = (_c = chunk.candidates) == null ? void 0 : _c[0];
|
287
|
+
if (firstCandidate == null) {
|
288
|
+
controller.enqueue({
|
289
|
+
type: "error",
|
290
|
+
error: new import_provider2.NoContentGeneratedError({
|
291
|
+
message: "No candidates in chunk."
|
292
|
+
})
|
293
|
+
});
|
294
|
+
return;
|
295
|
+
}
|
296
|
+
if (firstCandidate.finishReason != null) {
|
297
|
+
finishReason = mapGoogleVertexFinishReason({
|
298
|
+
finishReason: firstCandidate.finishReason,
|
299
|
+
hasToolCalls: false
|
300
|
+
});
|
301
|
+
}
|
302
|
+
const textDelta = firstCandidate.content.parts.map((part) => part.text).join("");
|
303
|
+
controller.enqueue({
|
304
|
+
type: "text-delta",
|
305
|
+
textDelta
|
306
|
+
});
|
307
|
+
},
|
308
|
+
flush(controller) {
|
309
|
+
controller.enqueue({
|
310
|
+
type: "finish",
|
311
|
+
finishReason,
|
312
|
+
usage
|
313
|
+
});
|
314
|
+
}
|
315
|
+
}
|
316
|
+
)
|
317
|
+
),
|
318
|
+
rawCall: {
|
319
|
+
rawPrompt: contentRequest,
|
320
|
+
rawSettings: {}
|
321
|
+
},
|
322
|
+
warnings
|
323
|
+
};
|
324
|
+
}
|
325
|
+
};
|
326
|
+
|
327
|
+
// src/google-vertex-provider.ts
|
328
|
+
function createVertex(options = {}) {
|
329
|
+
const createVertexAI = () => {
|
330
|
+
var _a, _b;
|
331
|
+
const config = {
|
332
|
+
project: (0, import_provider_utils3.loadSetting)({
|
333
|
+
settingValue: options.project,
|
334
|
+
settingName: "project",
|
335
|
+
environmentVariableName: "GOOGLE_VERTEX_PROJECT",
|
336
|
+
description: "Google Vertex project"
|
337
|
+
}),
|
338
|
+
location: (0, import_provider_utils3.loadSetting)({
|
339
|
+
settingValue: options.location,
|
340
|
+
settingName: "location",
|
341
|
+
environmentVariableName: "GOOGLE_VERTEX_LOCATION",
|
342
|
+
description: "Google Vertex location"
|
343
|
+
})
|
344
|
+
};
|
345
|
+
return (_b = (_a = options.createVertexAI) == null ? void 0 : _a.call(options, config)) != null ? _b : new import_vertexai.VertexAI(config);
|
346
|
+
};
|
347
|
+
const createChatModel = (modelId, settings = {}) => {
|
348
|
+
var _a;
|
349
|
+
return new GoogleVertexLanguageModel(modelId, settings, {
|
350
|
+
vertexAI: createVertexAI(),
|
351
|
+
generateId: (_a = options.generateId) != null ? _a : import_provider_utils3.generateId
|
352
|
+
});
|
353
|
+
};
|
354
|
+
const provider = function(modelId, settings) {
|
355
|
+
if (new.target) {
|
356
|
+
throw new Error(
|
357
|
+
"The Google Vertex AI model function cannot be called with the new keyword."
|
358
|
+
);
|
359
|
+
}
|
360
|
+
return createChatModel(modelId, settings);
|
361
|
+
};
|
362
|
+
provider.chat = createChatModel;
|
363
|
+
return provider;
|
364
|
+
}
|
365
|
+
var vertex = createVertex();
|
366
|
+
// Annotate the CommonJS export names for ESM import in node:
|
367
|
+
0 && (module.exports = {
|
368
|
+
createVertex,
|
369
|
+
vertex
|
370
|
+
});
|
371
|
+
//# sourceMappingURL=index.js.map
|
@@ -0,0 +1 @@
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/google-vertex-provider.ts","../src/google-vertex-language-model.ts","../src/convert-to-google-vertex-content-request.ts","../src/map-google-vertex-finish-reason.ts"],"sourcesContent":["export * from './google-vertex-provider';\n","import { generateId, loadSetting } from '@ai-sdk/provider-utils';\nimport { VertexAI } from '@google-cloud/vertexai';\nimport { GoogleVertexLanguageModel } from './google-vertex-language-model';\nimport {\n GoogleVertexModelId,\n GoogleVertexSettings,\n} from './google-vertex-settings';\n\nexport interface GoogleVertexProvider {\n /**\nCreates a model for text generation.\n */\n (\n modelId: GoogleVertexModelId,\n settings?: GoogleVertexSettings,\n ): GoogleVertexLanguageModel;\n}\n\nexport interface GoogleVertexProviderSettings {\n /**\nYour Google Vertex location. Defaults to the environment variable `GOOGLE_VERTEX_LOCATION`.\n */\n location?: string;\n\n /**\nYour Google Vertex project. Defaults to the environment variable `GOOGLE_VERTEX_PROJECT`.\n */\n project?: string;\n\n // for testing\n generateId?: () => string;\n\n // for testing\n createVertexAI?: ({\n project,\n location,\n }: {\n project: string;\n location: string;\n }) => VertexAI;\n}\n\n/**\nCreate a Google Vertex AI provider instance.\n */\nexport function createVertex(\n options: GoogleVertexProviderSettings = {},\n): GoogleVertexProvider {\n const createVertexAI = () => {\n const config = {\n project: loadSetting({\n settingValue: options.project,\n settingName: 'project',\n environmentVariableName: 'GOOGLE_VERTEX_PROJECT',\n description: 'Google Vertex project',\n }),\n location: loadSetting({\n settingValue: options.location,\n settingName: 'location',\n environmentVariableName: 'GOOGLE_VERTEX_LOCATION',\n description: 'Google Vertex location',\n }),\n };\n\n return options.createVertexAI?.(config) ?? new VertexAI(config);\n };\n\n const createChatModel = (\n modelId: GoogleVertexModelId,\n settings: GoogleVertexSettings = {},\n ) =>\n new GoogleVertexLanguageModel(modelId, settings, {\n vertexAI: createVertexAI(),\n generateId: options.generateId ?? generateId,\n });\n\n const provider = function (\n modelId: GoogleVertexModelId,\n settings?: GoogleVertexSettings,\n ) {\n if (new.target) {\n throw new Error(\n 'The Google Vertex AI model function cannot be called with the new keyword.',\n );\n }\n\n return createChatModel(modelId, settings);\n };\n\n provider.chat = createChatModel;\n\n return provider as GoogleVertexProvider;\n}\n\n/**\nDefault Google Vertex AI provider instance.\n */\nexport const vertex = createVertex();\n","import {\n LanguageModelV1,\n LanguageModelV1CallOptions,\n LanguageModelV1CallWarning,\n LanguageModelV1FinishReason,\n LanguageModelV1StreamPart,\n NoContentGeneratedError,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { convertAsyncGeneratorToReadableStream } from '@ai-sdk/provider-utils';\nimport {\n GenerateContentResponse,\n GenerationConfig,\n VertexAI,\n} from '@google-cloud/vertexai';\nimport { convertToGoogleVertexContentRequest } from './convert-to-google-vertex-content-request';\nimport {\n GoogleVertexModelId,\n GoogleVertexSettings,\n} from './google-vertex-settings';\nimport { mapGoogleVertexFinishReason } from './map-google-vertex-finish-reason';\n\ntype GoogleVertexAIConfig = {\n vertexAI: VertexAI;\n generateId: () => string;\n};\n\nexport class GoogleVertexLanguageModel implements LanguageModelV1 {\n readonly specificationVersion = 'v1';\n readonly provider = 'google-vertex';\n readonly defaultObjectGenerationMode = undefined;\n\n readonly modelId: GoogleVertexModelId;\n readonly settings: GoogleVertexSettings;\n\n private readonly config: GoogleVertexAIConfig;\n\n constructor(\n modelId: GoogleVertexModelId,\n settings: GoogleVertexSettings,\n config: GoogleVertexAIConfig,\n ) {\n this.modelId = modelId;\n this.settings = settings;\n this.config = config;\n }\n\n private getArgs({\n prompt,\n mode,\n frequencyPenalty,\n presencePenalty,\n seed,\n maxTokens,\n temperature,\n topP,\n }: LanguageModelV1CallOptions) {\n const warnings: LanguageModelV1CallWarning[] = [];\n\n if (frequencyPenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'frequencyPenalty',\n });\n }\n\n if (presencePenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'presencePenalty',\n });\n }\n\n if (seed != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'seed',\n });\n }\n\n const generationConfig: GenerationConfig = {\n // model specific settings:\n topK: this.settings.topK,\n\n // standardized settings:\n maxOutputTokens: maxTokens,\n temperature,\n topP,\n };\n\n const type = mode.type;\n\n switch (type) {\n case 'regular': {\n if (mode.tools?.length) {\n throw new UnsupportedFunctionalityError({\n functionality: 'tools',\n });\n }\n\n return {\n model: this.config.vertexAI.getGenerativeModel({\n model: this.modelId,\n generationConfig,\n }),\n contentRequest: convertToGoogleVertexContentRequest(prompt),\n warnings,\n };\n }\n\n case 'object-json': {\n throw new UnsupportedFunctionalityError({\n functionality: 'object-json mode',\n });\n }\n\n case 'object-tool': {\n throw new UnsupportedFunctionalityError({\n functionality: 'object-tool mode',\n });\n }\n\n case 'object-grammar': {\n throw new UnsupportedFunctionalityError({\n functionality: 'object-grammar mode',\n });\n }\n\n default: {\n const _exhaustiveCheck: never = type;\n throw new Error(`Unsupported type: ${_exhaustiveCheck}`);\n }\n }\n }\n\n async doGenerate(\n options: Parameters<LanguageModelV1['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {\n const { model, contentRequest, warnings } = this.getArgs(options);\n const { response } = await model.generateContent(contentRequest);\n\n const firstCandidate = response.candidates?.[0];\n\n if (firstCandidate == null) {\n throw new NoContentGeneratedError({ message: 'No candidates returned' });\n }\n\n const usageMetadata = response.usageMetadata;\n\n return {\n text: firstCandidate.content.parts.map(part => part.text).join(''),\n finishReason: mapGoogleVertexFinishReason({\n finishReason: firstCandidate.finishReason,\n hasToolCalls: false,\n }),\n usage: {\n promptTokens: usageMetadata?.promptTokenCount ?? NaN,\n completionTokens: usageMetadata?.candidatesTokenCount ?? NaN,\n },\n rawCall: {\n rawPrompt: contentRequest,\n rawSettings: {},\n },\n warnings,\n };\n }\n\n async doStream(\n options: Parameters<LanguageModelV1['doStream']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {\n const { model, contentRequest, warnings } = this.getArgs(options);\n const { stream } = await model.generateContentStream(contentRequest);\n\n let finishReason: LanguageModelV1FinishReason = 'other';\n let usage: { promptTokens: number; completionTokens: number } = {\n promptTokens: Number.NaN,\n completionTokens: Number.NaN,\n };\n\n return {\n stream: convertAsyncGeneratorToReadableStream(stream).pipeThrough(\n new TransformStream<GenerateContentResponse, LanguageModelV1StreamPart>(\n {\n transform(chunk, controller) {\n const usageMetadata = chunk.usageMetadata;\n if (usageMetadata != null) {\n usage = {\n promptTokens: usageMetadata.promptTokenCount ?? NaN,\n completionTokens: usageMetadata.candidatesTokenCount ?? NaN,\n };\n }\n\n const firstCandidate = chunk.candidates?.[0];\n\n if (firstCandidate == null) {\n controller.enqueue({\n type: 'error',\n error: new NoContentGeneratedError({\n message: 'No candidates in chunk.',\n }),\n });\n return;\n }\n\n if (firstCandidate.finishReason != null) {\n finishReason = mapGoogleVertexFinishReason({\n finishReason: firstCandidate.finishReason,\n hasToolCalls: false,\n });\n }\n\n const textDelta = firstCandidate.content.parts\n .map(part => part.text)\n .join('');\n\n controller.enqueue({\n type: 'text-delta',\n textDelta,\n });\n },\n\n flush(controller) {\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage,\n });\n },\n },\n ),\n ),\n rawCall: {\n rawPrompt: contentRequest,\n rawSettings: {},\n },\n warnings,\n };\n }\n}\n","import {\n LanguageModelV1Prompt,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils';\nimport { Content, GenerateContentRequest } from '@google-cloud/vertexai';\n\nexport function convertToGoogleVertexContentRequest(\n prompt: LanguageModelV1Prompt,\n): GenerateContentRequest {\n let systemInstruction: string | undefined = undefined;\n const contents: Content[] = [];\n\n for (const { role, content } of prompt) {\n switch (role) {\n case 'system': {\n if (systemInstruction != null) {\n throw new UnsupportedFunctionalityError({\n functionality: 'Multiple system messages',\n });\n }\n\n systemInstruction = content;\n break;\n }\n\n case 'user': {\n contents.push({\n role: 'user',\n parts: content.map(part => {\n switch (part.type) {\n case 'text': {\n return { text: part.text };\n }\n\n case 'image': {\n if (part.image instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality: 'URL image parts',\n });\n } else {\n return {\n inlineData: {\n data: convertUint8ArrayToBase64(part.image),\n mimeType: part.mimeType ?? 'image/jpeg',\n },\n };\n }\n }\n\n default: {\n const _exhaustiveCheck: never = part;\n throw new UnsupportedFunctionalityError({\n functionality: `prompt part: ${_exhaustiveCheck}`,\n });\n }\n }\n }),\n });\n break;\n }\n\n case 'assistant': {\n contents.push({\n role: 'assistant',\n parts: content.map(part => {\n switch (part.type) {\n case 'text': {\n return { type: 'text', text: part.text };\n }\n\n case 'tool-call': {\n throw new UnsupportedFunctionalityError({\n functionality: 'tool-call',\n });\n }\n\n default: {\n const _exhaustiveCheck: never = part;\n throw new UnsupportedFunctionalityError({\n functionality: `prompt part: ${_exhaustiveCheck}`,\n });\n }\n }\n }),\n });\n\n break;\n }\n\n case 'tool': {\n throw new UnsupportedFunctionalityError({\n functionality: `role: tool`,\n });\n }\n\n default: {\n const _exhaustiveCheck: never = role;\n throw new UnsupportedFunctionalityError({\n functionality: `role: ${_exhaustiveCheck}`,\n });\n }\n }\n }\n\n return {\n systemInstruction,\n contents,\n };\n}\n","import { LanguageModelV1FinishReason } from '@ai-sdk/provider';\nimport { FinishReason } from '@google-cloud/vertexai';\n\nexport function mapGoogleVertexFinishReason({\n finishReason,\n hasToolCalls,\n}: {\n finishReason: FinishReason | undefined;\n hasToolCalls: boolean;\n}): LanguageModelV1FinishReason {\n switch (finishReason) {\n case 'STOP':\n return hasToolCalls ? 'tool-calls' : 'stop';\n case 'MAX_TOKENS':\n return 'length';\n case 'BLOCKLIST':\n case 'PROHIBITED_CONTENT':\n case 'SPII':\n case 'RECITATION':\n case 'SAFETY':\n return 'content-filter';\n case 'FINISH_REASON_UNSPECIFIED':\n case 'OTHER':\n default:\n return 'other';\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,IAAAA,yBAAwC;AACxC,sBAAyB;;;ACDzB,IAAAC,mBAQO;AACP,IAAAC,yBAAsD;;;ACTtD,sBAGO;AACP,4BAA0C;AAGnC,SAAS,oCACd,QACwB;AACxB,MAAI,oBAAwC;AAC5C,QAAM,WAAsB,CAAC;AAE7B,aAAW,EAAE,MAAM,QAAQ,KAAK,QAAQ;AACtC,YAAQ,MAAM;AAAA,MACZ,KAAK,UAAU;AACb,YAAI,qBAAqB,MAAM;AAC7B,gBAAM,IAAI,8CAA8B;AAAA,YACtC,eAAe;AAAA,UACjB,CAAC;AAAA,QACH;AAEA,4BAAoB;AACpB;AAAA,MACF;AAAA,MAEA,KAAK,QAAQ;AACX,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,OAAO,QAAQ,IAAI,UAAQ;AA7BrC;AA8BY,oBAAQ,KAAK,MAAM;AAAA,cACjB,KAAK,QAAQ;AACX,uBAAO,EAAE,MAAM,KAAK,KAAK;AAAA,cAC3B;AAAA,cAEA,KAAK,SAAS;AACZ,oBAAI,KAAK,iBAAiB,KAAK;AAC7B,wBAAM,IAAI,8CAA8B;AAAA,oBACtC,eAAe;AAAA,kBACjB,CAAC;AAAA,gBACH,OAAO;AACL,yBAAO;AAAA,oBACL,YAAY;AAAA,sBACV,UAAM,iDAA0B,KAAK,KAAK;AAAA,sBAC1C,WAAU,UAAK,aAAL,YAAiB;AAAA,oBAC7B;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,cAEA,SAAS;AACP,sBAAM,mBAA0B;AAChC,sBAAM,IAAI,8CAA8B;AAAA,kBACtC,eAAe,gBAAgB,gBAAgB;AAAA,gBACjD,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF,CAAC;AAAA,QACH,CAAC;AACD;AAAA,MACF;AAAA,MAEA,KAAK,aAAa;AAChB,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,OAAO,QAAQ,IAAI,UAAQ;AACzB,oBAAQ,KAAK,MAAM;AAAA,cACjB,KAAK,QAAQ;AACX,uBAAO,EAAE,MAAM,QAAQ,MAAM,KAAK,KAAK;AAAA,cACzC;AAAA,cAEA,KAAK,aAAa;AAChB,sBAAM,IAAI,8CAA8B;AAAA,kBACtC,eAAe;AAAA,gBACjB,CAAC;AAAA,cACH;AAAA,cAEA,SAAS;AACP,sBAAM,mBAA0B;AAChC,sBAAM,IAAI,8CAA8B;AAAA,kBACtC,eAAe,gBAAgB,gBAAgB;AAAA,gBACjD,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF,CAAC;AAAA,QACH,CAAC;AAED;AAAA,MACF;AAAA,MAEA,KAAK,QAAQ;AACX,cAAM,IAAI,8CAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,SAAS;AACP,cAAM,mBAA0B;AAChC,cAAM,IAAI,8CAA8B;AAAA,UACtC,eAAe,SAAS,gBAAgB;AAAA,QAC1C,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,EACF;AACF;;;AC1GO,SAAS,4BAA4B;AAAA,EAC1C;AAAA,EACA;AACF,GAGgC;AAC9B,UAAQ,cAAc;AAAA,IACpB,KAAK;AACH,aAAO,eAAe,eAAe;AAAA,IACvC,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL,KAAK;AAAA,IACL;AACE,aAAO;AAAA,EACX;AACF;;;AFCO,IAAM,4BAAN,MAA2D;AAAA,EAUhE,YACE,SACA,UACA,QACA;AAbF,SAAS,uBAAuB;AAChC,SAAS,WAAW;AACpB,SAAS,8BAA8B;AAYrC,SAAK,UAAU;AACf,SAAK,WAAW;AAChB,SAAK,SAAS;AAAA,EAChB;AAAA,EAEQ,QAAQ;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAA+B;AAxDjC;AAyDI,UAAM,WAAyC,CAAC;AAEhD,QAAI,oBAAoB,MAAM;AAC5B,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,QAAI,mBAAmB,MAAM;AAC3B,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,UAAM,mBAAqC;AAAA;AAAA,MAEzC,MAAM,KAAK,SAAS;AAAA;AAAA,MAGpB,iBAAiB;AAAA,MACjB;AAAA,MACA;AAAA,IACF;AAEA,UAAM,OAAO,KAAK;AAElB,YAAQ,MAAM;AAAA,MACZ,KAAK,WAAW;AACd,aAAI,UAAK,UAAL,mBAAY,QAAQ;AACtB,gBAAM,IAAI,+CAA8B;AAAA,YACtC,eAAe;AAAA,UACjB,CAAC;AAAA,QACH;AAEA,eAAO;AAAA,UACL,OAAO,KAAK,OAAO,SAAS,mBAAmB;AAAA,YAC7C,OAAO,KAAK;AAAA,YACZ;AAAA,UACF,CAAC;AAAA,UACD,gBAAgB,oCAAoC,MAAM;AAAA,UAC1D;AAAA,QACF;AAAA,MACF;AAAA,MAEA,KAAK,eAAe;AAClB,cAAM,IAAI,+CAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,KAAK,eAAe;AAClB,cAAM,IAAI,+CAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,KAAK,kBAAkB;AACrB,cAAM,IAAI,+CAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,SAAS;AACP,cAAM,mBAA0B;AAChC,cAAM,IAAI,MAAM,qBAAqB,gBAAgB,EAAE;AAAA,MACzD;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,WACJ,SAC6D;AAzIjE;AA0II,UAAM,EAAE,OAAO,gBAAgB,SAAS,IAAI,KAAK,QAAQ,OAAO;AAChE,UAAM,EAAE,SAAS,IAAI,MAAM,MAAM,gBAAgB,cAAc;AAE/D,UAAM,kBAAiB,cAAS,eAAT,mBAAsB;AAE7C,QAAI,kBAAkB,MAAM;AAC1B,YAAM,IAAI,yCAAwB,EAAE,SAAS,yBAAyB,CAAC;AAAA,IACzE;AAEA,UAAM,gBAAgB,SAAS;AAE/B,WAAO;AAAA,MACL,MAAM,eAAe,QAAQ,MAAM,IAAI,UAAQ,KAAK,IAAI,EAAE,KAAK,EAAE;AAAA,MACjE,cAAc,4BAA4B;AAAA,QACxC,cAAc,eAAe;AAAA,QAC7B,cAAc;AAAA,MAChB,CAAC;AAAA,MACD,OAAO;AAAA,QACL,eAAc,oDAAe,qBAAf,YAAmC;AAAA,QACjD,mBAAkB,oDAAe,yBAAf,YAAuC;AAAA,MAC3D;AAAA,MACA,SAAS;AAAA,QACP,WAAW;AAAA,QACX,aAAa,CAAC;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,SACJ,SAC2D;AAC3D,UAAM,EAAE,OAAO,gBAAgB,SAAS,IAAI,KAAK,QAAQ,OAAO;AAChE,UAAM,EAAE,OAAO,IAAI,MAAM,MAAM,sBAAsB,cAAc;AAEnE,QAAI,eAA4C;AAChD,QAAI,QAA4D;AAAA,MAC9D,cAAc,OAAO;AAAA,MACrB,kBAAkB,OAAO;AAAA,IAC3B;AAEA,WAAO;AAAA,MACL,YAAQ,8DAAsC,MAAM,EAAE;AAAA,QACpD,IAAI;AAAA,UACF;AAAA,YACE,UAAU,OAAO,YAAY;AAvLzC;AAwLc,oBAAM,gBAAgB,MAAM;AAC5B,kBAAI,iBAAiB,MAAM;AACzB,wBAAQ;AAAA,kBACN,eAAc,mBAAc,qBAAd,YAAkC;AAAA,kBAChD,mBAAkB,mBAAc,yBAAd,YAAsC;AAAA,gBAC1D;AAAA,cACF;AAEA,oBAAM,kBAAiB,WAAM,eAAN,mBAAmB;AAE1C,kBAAI,kBAAkB,MAAM;AAC1B,2BAAW,QAAQ;AAAA,kBACjB,MAAM;AAAA,kBACN,OAAO,IAAI,yCAAwB;AAAA,oBACjC,SAAS;AAAA,kBACX,CAAC;AAAA,gBACH,CAAC;AACD;AAAA,cACF;AAEA,kBAAI,eAAe,gBAAgB,MAAM;AACvC,+BAAe,4BAA4B;AAAA,kBACzC,cAAc,eAAe;AAAA,kBAC7B,cAAc;AAAA,gBAChB,CAAC;AAAA,cACH;AAEA,oBAAM,YAAY,eAAe,QAAQ,MACtC,IAAI,UAAQ,KAAK,IAAI,EACrB,KAAK,EAAE;AAEV,yBAAW,QAAQ;AAAA,gBACjB,MAAM;AAAA,gBACN;AAAA,cACF,CAAC;AAAA,YACH;AAAA,YAEA,MAAM,YAAY;AAChB,yBAAW,QAAQ;AAAA,gBACjB,MAAM;AAAA,gBACN;AAAA,gBACA;AAAA,cACF,CAAC;AAAA,YACH;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,MACA,SAAS;AAAA,QACP,WAAW;AAAA,QACX,aAAa,CAAC;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AACF;;;ADjMO,SAAS,aACd,UAAwC,CAAC,GACnB;AACtB,QAAM,iBAAiB,MAAM;AAhD/B;AAiDI,UAAM,SAAS;AAAA,MACb,aAAS,oCAAY;AAAA,QACnB,cAAc,QAAQ;AAAA,QACtB,aAAa;AAAA,QACb,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC;AAAA,MACD,cAAU,oCAAY;AAAA,QACpB,cAAc,QAAQ;AAAA,QACtB,aAAa;AAAA,QACb,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC;AAAA,IACH;AAEA,YAAO,mBAAQ,mBAAR,iCAAyB,YAAzB,YAAoC,IAAI,yBAAS,MAAM;AAAA,EAChE;AAEA,QAAM,kBAAkB,CACtB,SACA,WAAiC,CAAC,MAClC;AAtEJ;AAuEI,eAAI,0BAA0B,SAAS,UAAU;AAAA,MAC/C,UAAU,eAAe;AAAA,MACzB,aAAY,aAAQ,eAAR,YAAsB;AAAA,IACpC,CAAC;AAAA;AAEH,QAAM,WAAW,SACf,SACA,UACA;AACA,QAAI,YAAY;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,gBAAgB,SAAS,QAAQ;AAAA,EAC1C;AAEA,WAAS,OAAO;AAEhB,SAAO;AACT;AAKO,IAAM,SAAS,aAAa;","names":["import_provider_utils","import_provider","import_provider_utils"]}
|
package/dist/index.mjs
ADDED
@@ -0,0 +1,348 @@
|
|
1
|
+
// src/google-vertex-provider.ts
|
2
|
+
import { generateId, loadSetting } from "@ai-sdk/provider-utils";
|
3
|
+
import { VertexAI } from "@google-cloud/vertexai";
|
4
|
+
|
5
|
+
// src/google-vertex-language-model.ts
|
6
|
+
import {
|
7
|
+
NoContentGeneratedError,
|
8
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
9
|
+
} from "@ai-sdk/provider";
|
10
|
+
import { convertAsyncGeneratorToReadableStream } from "@ai-sdk/provider-utils";
|
11
|
+
|
12
|
+
// src/convert-to-google-vertex-content-request.ts
|
13
|
+
import {
|
14
|
+
UnsupportedFunctionalityError
|
15
|
+
} from "@ai-sdk/provider";
|
16
|
+
import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
|
17
|
+
function convertToGoogleVertexContentRequest(prompt) {
|
18
|
+
let systemInstruction = void 0;
|
19
|
+
const contents = [];
|
20
|
+
for (const { role, content } of prompt) {
|
21
|
+
switch (role) {
|
22
|
+
case "system": {
|
23
|
+
if (systemInstruction != null) {
|
24
|
+
throw new UnsupportedFunctionalityError({
|
25
|
+
functionality: "Multiple system messages"
|
26
|
+
});
|
27
|
+
}
|
28
|
+
systemInstruction = content;
|
29
|
+
break;
|
30
|
+
}
|
31
|
+
case "user": {
|
32
|
+
contents.push({
|
33
|
+
role: "user",
|
34
|
+
parts: content.map((part) => {
|
35
|
+
var _a;
|
36
|
+
switch (part.type) {
|
37
|
+
case "text": {
|
38
|
+
return { text: part.text };
|
39
|
+
}
|
40
|
+
case "image": {
|
41
|
+
if (part.image instanceof URL) {
|
42
|
+
throw new UnsupportedFunctionalityError({
|
43
|
+
functionality: "URL image parts"
|
44
|
+
});
|
45
|
+
} else {
|
46
|
+
return {
|
47
|
+
inlineData: {
|
48
|
+
data: convertUint8ArrayToBase64(part.image),
|
49
|
+
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg"
|
50
|
+
}
|
51
|
+
};
|
52
|
+
}
|
53
|
+
}
|
54
|
+
default: {
|
55
|
+
const _exhaustiveCheck = part;
|
56
|
+
throw new UnsupportedFunctionalityError({
|
57
|
+
functionality: `prompt part: ${_exhaustiveCheck}`
|
58
|
+
});
|
59
|
+
}
|
60
|
+
}
|
61
|
+
})
|
62
|
+
});
|
63
|
+
break;
|
64
|
+
}
|
65
|
+
case "assistant": {
|
66
|
+
contents.push({
|
67
|
+
role: "assistant",
|
68
|
+
parts: content.map((part) => {
|
69
|
+
switch (part.type) {
|
70
|
+
case "text": {
|
71
|
+
return { type: "text", text: part.text };
|
72
|
+
}
|
73
|
+
case "tool-call": {
|
74
|
+
throw new UnsupportedFunctionalityError({
|
75
|
+
functionality: "tool-call"
|
76
|
+
});
|
77
|
+
}
|
78
|
+
default: {
|
79
|
+
const _exhaustiveCheck = part;
|
80
|
+
throw new UnsupportedFunctionalityError({
|
81
|
+
functionality: `prompt part: ${_exhaustiveCheck}`
|
82
|
+
});
|
83
|
+
}
|
84
|
+
}
|
85
|
+
})
|
86
|
+
});
|
87
|
+
break;
|
88
|
+
}
|
89
|
+
case "tool": {
|
90
|
+
throw new UnsupportedFunctionalityError({
|
91
|
+
functionality: `role: tool`
|
92
|
+
});
|
93
|
+
}
|
94
|
+
default: {
|
95
|
+
const _exhaustiveCheck = role;
|
96
|
+
throw new UnsupportedFunctionalityError({
|
97
|
+
functionality: `role: ${_exhaustiveCheck}`
|
98
|
+
});
|
99
|
+
}
|
100
|
+
}
|
101
|
+
}
|
102
|
+
return {
|
103
|
+
systemInstruction,
|
104
|
+
contents
|
105
|
+
};
|
106
|
+
}
|
107
|
+
|
108
|
+
// src/map-google-vertex-finish-reason.ts
|
109
|
+
function mapGoogleVertexFinishReason({
|
110
|
+
finishReason,
|
111
|
+
hasToolCalls
|
112
|
+
}) {
|
113
|
+
switch (finishReason) {
|
114
|
+
case "STOP":
|
115
|
+
return hasToolCalls ? "tool-calls" : "stop";
|
116
|
+
case "MAX_TOKENS":
|
117
|
+
return "length";
|
118
|
+
case "BLOCKLIST":
|
119
|
+
case "PROHIBITED_CONTENT":
|
120
|
+
case "SPII":
|
121
|
+
case "RECITATION":
|
122
|
+
case "SAFETY":
|
123
|
+
return "content-filter";
|
124
|
+
case "FINISH_REASON_UNSPECIFIED":
|
125
|
+
case "OTHER":
|
126
|
+
default:
|
127
|
+
return "other";
|
128
|
+
}
|
129
|
+
}
|
130
|
+
|
131
|
+
// src/google-vertex-language-model.ts
|
132
|
+
var GoogleVertexLanguageModel = class {
|
133
|
+
constructor(modelId, settings, config) {
|
134
|
+
this.specificationVersion = "v1";
|
135
|
+
this.provider = "google-vertex";
|
136
|
+
this.defaultObjectGenerationMode = void 0;
|
137
|
+
this.modelId = modelId;
|
138
|
+
this.settings = settings;
|
139
|
+
this.config = config;
|
140
|
+
}
|
141
|
+
getArgs({
|
142
|
+
prompt,
|
143
|
+
mode,
|
144
|
+
frequencyPenalty,
|
145
|
+
presencePenalty,
|
146
|
+
seed,
|
147
|
+
maxTokens,
|
148
|
+
temperature,
|
149
|
+
topP
|
150
|
+
}) {
|
151
|
+
var _a;
|
152
|
+
const warnings = [];
|
153
|
+
if (frequencyPenalty != null) {
|
154
|
+
warnings.push({
|
155
|
+
type: "unsupported-setting",
|
156
|
+
setting: "frequencyPenalty"
|
157
|
+
});
|
158
|
+
}
|
159
|
+
if (presencePenalty != null) {
|
160
|
+
warnings.push({
|
161
|
+
type: "unsupported-setting",
|
162
|
+
setting: "presencePenalty"
|
163
|
+
});
|
164
|
+
}
|
165
|
+
if (seed != null) {
|
166
|
+
warnings.push({
|
167
|
+
type: "unsupported-setting",
|
168
|
+
setting: "seed"
|
169
|
+
});
|
170
|
+
}
|
171
|
+
const generationConfig = {
|
172
|
+
// model specific settings:
|
173
|
+
topK: this.settings.topK,
|
174
|
+
// standardized settings:
|
175
|
+
maxOutputTokens: maxTokens,
|
176
|
+
temperature,
|
177
|
+
topP
|
178
|
+
};
|
179
|
+
const type = mode.type;
|
180
|
+
switch (type) {
|
181
|
+
case "regular": {
|
182
|
+
if ((_a = mode.tools) == null ? void 0 : _a.length) {
|
183
|
+
throw new UnsupportedFunctionalityError2({
|
184
|
+
functionality: "tools"
|
185
|
+
});
|
186
|
+
}
|
187
|
+
return {
|
188
|
+
model: this.config.vertexAI.getGenerativeModel({
|
189
|
+
model: this.modelId,
|
190
|
+
generationConfig
|
191
|
+
}),
|
192
|
+
contentRequest: convertToGoogleVertexContentRequest(prompt),
|
193
|
+
warnings
|
194
|
+
};
|
195
|
+
}
|
196
|
+
case "object-json": {
|
197
|
+
throw new UnsupportedFunctionalityError2({
|
198
|
+
functionality: "object-json mode"
|
199
|
+
});
|
200
|
+
}
|
201
|
+
case "object-tool": {
|
202
|
+
throw new UnsupportedFunctionalityError2({
|
203
|
+
functionality: "object-tool mode"
|
204
|
+
});
|
205
|
+
}
|
206
|
+
case "object-grammar": {
|
207
|
+
throw new UnsupportedFunctionalityError2({
|
208
|
+
functionality: "object-grammar mode"
|
209
|
+
});
|
210
|
+
}
|
211
|
+
default: {
|
212
|
+
const _exhaustiveCheck = type;
|
213
|
+
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
214
|
+
}
|
215
|
+
}
|
216
|
+
}
|
217
|
+
async doGenerate(options) {
|
218
|
+
var _a, _b, _c;
|
219
|
+
const { model, contentRequest, warnings } = this.getArgs(options);
|
220
|
+
const { response } = await model.generateContent(contentRequest);
|
221
|
+
const firstCandidate = (_a = response.candidates) == null ? void 0 : _a[0];
|
222
|
+
if (firstCandidate == null) {
|
223
|
+
throw new NoContentGeneratedError({ message: "No candidates returned" });
|
224
|
+
}
|
225
|
+
const usageMetadata = response.usageMetadata;
|
226
|
+
return {
|
227
|
+
text: firstCandidate.content.parts.map((part) => part.text).join(""),
|
228
|
+
finishReason: mapGoogleVertexFinishReason({
|
229
|
+
finishReason: firstCandidate.finishReason,
|
230
|
+
hasToolCalls: false
|
231
|
+
}),
|
232
|
+
usage: {
|
233
|
+
promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
|
234
|
+
completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
|
235
|
+
},
|
236
|
+
rawCall: {
|
237
|
+
rawPrompt: contentRequest,
|
238
|
+
rawSettings: {}
|
239
|
+
},
|
240
|
+
warnings
|
241
|
+
};
|
242
|
+
}
|
243
|
+
async doStream(options) {
|
244
|
+
const { model, contentRequest, warnings } = this.getArgs(options);
|
245
|
+
const { stream } = await model.generateContentStream(contentRequest);
|
246
|
+
let finishReason = "other";
|
247
|
+
let usage = {
|
248
|
+
promptTokens: Number.NaN,
|
249
|
+
completionTokens: Number.NaN
|
250
|
+
};
|
251
|
+
return {
|
252
|
+
stream: convertAsyncGeneratorToReadableStream(stream).pipeThrough(
|
253
|
+
new TransformStream(
|
254
|
+
{
|
255
|
+
transform(chunk, controller) {
|
256
|
+
var _a, _b, _c;
|
257
|
+
const usageMetadata = chunk.usageMetadata;
|
258
|
+
if (usageMetadata != null) {
|
259
|
+
usage = {
|
260
|
+
promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
|
261
|
+
completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
|
262
|
+
};
|
263
|
+
}
|
264
|
+
const firstCandidate = (_c = chunk.candidates) == null ? void 0 : _c[0];
|
265
|
+
if (firstCandidate == null) {
|
266
|
+
controller.enqueue({
|
267
|
+
type: "error",
|
268
|
+
error: new NoContentGeneratedError({
|
269
|
+
message: "No candidates in chunk."
|
270
|
+
})
|
271
|
+
});
|
272
|
+
return;
|
273
|
+
}
|
274
|
+
if (firstCandidate.finishReason != null) {
|
275
|
+
finishReason = mapGoogleVertexFinishReason({
|
276
|
+
finishReason: firstCandidate.finishReason,
|
277
|
+
hasToolCalls: false
|
278
|
+
});
|
279
|
+
}
|
280
|
+
const textDelta = firstCandidate.content.parts.map((part) => part.text).join("");
|
281
|
+
controller.enqueue({
|
282
|
+
type: "text-delta",
|
283
|
+
textDelta
|
284
|
+
});
|
285
|
+
},
|
286
|
+
flush(controller) {
|
287
|
+
controller.enqueue({
|
288
|
+
type: "finish",
|
289
|
+
finishReason,
|
290
|
+
usage
|
291
|
+
});
|
292
|
+
}
|
293
|
+
}
|
294
|
+
)
|
295
|
+
),
|
296
|
+
rawCall: {
|
297
|
+
rawPrompt: contentRequest,
|
298
|
+
rawSettings: {}
|
299
|
+
},
|
300
|
+
warnings
|
301
|
+
};
|
302
|
+
}
|
303
|
+
};
|
304
|
+
|
305
|
+
// src/google-vertex-provider.ts
|
306
|
+
function createVertex(options = {}) {
|
307
|
+
const createVertexAI = () => {
|
308
|
+
var _a, _b;
|
309
|
+
const config = {
|
310
|
+
project: loadSetting({
|
311
|
+
settingValue: options.project,
|
312
|
+
settingName: "project",
|
313
|
+
environmentVariableName: "GOOGLE_VERTEX_PROJECT",
|
314
|
+
description: "Google Vertex project"
|
315
|
+
}),
|
316
|
+
location: loadSetting({
|
317
|
+
settingValue: options.location,
|
318
|
+
settingName: "location",
|
319
|
+
environmentVariableName: "GOOGLE_VERTEX_LOCATION",
|
320
|
+
description: "Google Vertex location"
|
321
|
+
})
|
322
|
+
};
|
323
|
+
return (_b = (_a = options.createVertexAI) == null ? void 0 : _a.call(options, config)) != null ? _b : new VertexAI(config);
|
324
|
+
};
|
325
|
+
const createChatModel = (modelId, settings = {}) => {
|
326
|
+
var _a;
|
327
|
+
return new GoogleVertexLanguageModel(modelId, settings, {
|
328
|
+
vertexAI: createVertexAI(),
|
329
|
+
generateId: (_a = options.generateId) != null ? _a : generateId
|
330
|
+
});
|
331
|
+
};
|
332
|
+
const provider = function(modelId, settings) {
|
333
|
+
if (new.target) {
|
334
|
+
throw new Error(
|
335
|
+
"The Google Vertex AI model function cannot be called with the new keyword."
|
336
|
+
);
|
337
|
+
}
|
338
|
+
return createChatModel(modelId, settings);
|
339
|
+
};
|
340
|
+
provider.chat = createChatModel;
|
341
|
+
return provider;
|
342
|
+
}
|
343
|
+
var vertex = createVertex();
|
344
|
+
export {
|
345
|
+
createVertex,
|
346
|
+
vertex
|
347
|
+
};
|
348
|
+
//# sourceMappingURL=index.mjs.map
|
@@ -0,0 +1 @@
|
|
1
|
+
{"version":3,"sources":["../src/google-vertex-provider.ts","../src/google-vertex-language-model.ts","../src/convert-to-google-vertex-content-request.ts","../src/map-google-vertex-finish-reason.ts"],"sourcesContent":["import { generateId, loadSetting } from '@ai-sdk/provider-utils';\nimport { VertexAI } from '@google-cloud/vertexai';\nimport { GoogleVertexLanguageModel } from './google-vertex-language-model';\nimport {\n GoogleVertexModelId,\n GoogleVertexSettings,\n} from './google-vertex-settings';\n\nexport interface GoogleVertexProvider {\n /**\nCreates a model for text generation.\n */\n (\n modelId: GoogleVertexModelId,\n settings?: GoogleVertexSettings,\n ): GoogleVertexLanguageModel;\n}\n\nexport interface GoogleVertexProviderSettings {\n /**\nYour Google Vertex location. Defaults to the environment variable `GOOGLE_VERTEX_LOCATION`.\n */\n location?: string;\n\n /**\nYour Google Vertex project. Defaults to the environment variable `GOOGLE_VERTEX_PROJECT`.\n */\n project?: string;\n\n // for testing\n generateId?: () => string;\n\n // for testing\n createVertexAI?: ({\n project,\n location,\n }: {\n project: string;\n location: string;\n }) => VertexAI;\n}\n\n/**\nCreate a Google Vertex AI provider instance.\n */\nexport function createVertex(\n options: GoogleVertexProviderSettings = {},\n): GoogleVertexProvider {\n const createVertexAI = () => {\n const config = {\n project: loadSetting({\n settingValue: options.project,\n settingName: 'project',\n environmentVariableName: 'GOOGLE_VERTEX_PROJECT',\n description: 'Google Vertex project',\n }),\n location: loadSetting({\n settingValue: options.location,\n settingName: 'location',\n environmentVariableName: 'GOOGLE_VERTEX_LOCATION',\n description: 'Google Vertex location',\n }),\n };\n\n return options.createVertexAI?.(config) ?? new VertexAI(config);\n };\n\n const createChatModel = (\n modelId: GoogleVertexModelId,\n settings: GoogleVertexSettings = {},\n ) =>\n new GoogleVertexLanguageModel(modelId, settings, {\n vertexAI: createVertexAI(),\n generateId: options.generateId ?? generateId,\n });\n\n const provider = function (\n modelId: GoogleVertexModelId,\n settings?: GoogleVertexSettings,\n ) {\n if (new.target) {\n throw new Error(\n 'The Google Vertex AI model function cannot be called with the new keyword.',\n );\n }\n\n return createChatModel(modelId, settings);\n };\n\n provider.chat = createChatModel;\n\n return provider as GoogleVertexProvider;\n}\n\n/**\nDefault Google Vertex AI provider instance.\n */\nexport const vertex = createVertex();\n","import {\n LanguageModelV1,\n LanguageModelV1CallOptions,\n LanguageModelV1CallWarning,\n LanguageModelV1FinishReason,\n LanguageModelV1StreamPart,\n NoContentGeneratedError,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { convertAsyncGeneratorToReadableStream } from '@ai-sdk/provider-utils';\nimport {\n GenerateContentResponse,\n GenerationConfig,\n VertexAI,\n} from '@google-cloud/vertexai';\nimport { convertToGoogleVertexContentRequest } from './convert-to-google-vertex-content-request';\nimport {\n GoogleVertexModelId,\n GoogleVertexSettings,\n} from './google-vertex-settings';\nimport { mapGoogleVertexFinishReason } from './map-google-vertex-finish-reason';\n\ntype GoogleVertexAIConfig = {\n vertexAI: VertexAI;\n generateId: () => string;\n};\n\nexport class GoogleVertexLanguageModel implements LanguageModelV1 {\n readonly specificationVersion = 'v1';\n readonly provider = 'google-vertex';\n readonly defaultObjectGenerationMode = undefined;\n\n readonly modelId: GoogleVertexModelId;\n readonly settings: GoogleVertexSettings;\n\n private readonly config: GoogleVertexAIConfig;\n\n constructor(\n modelId: GoogleVertexModelId,\n settings: GoogleVertexSettings,\n config: GoogleVertexAIConfig,\n ) {\n this.modelId = modelId;\n this.settings = settings;\n this.config = config;\n }\n\n private getArgs({\n prompt,\n mode,\n frequencyPenalty,\n presencePenalty,\n seed,\n maxTokens,\n temperature,\n topP,\n }: LanguageModelV1CallOptions) {\n const warnings: LanguageModelV1CallWarning[] = [];\n\n if (frequencyPenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'frequencyPenalty',\n });\n }\n\n if (presencePenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'presencePenalty',\n });\n }\n\n if (seed != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'seed',\n });\n }\n\n const generationConfig: GenerationConfig = {\n // model specific settings:\n topK: this.settings.topK,\n\n // standardized settings:\n maxOutputTokens: maxTokens,\n temperature,\n topP,\n };\n\n const type = mode.type;\n\n switch (type) {\n case 'regular': {\n if (mode.tools?.length) {\n throw new UnsupportedFunctionalityError({\n functionality: 'tools',\n });\n }\n\n return {\n model: this.config.vertexAI.getGenerativeModel({\n model: this.modelId,\n generationConfig,\n }),\n contentRequest: convertToGoogleVertexContentRequest(prompt),\n warnings,\n };\n }\n\n case 'object-json': {\n throw new UnsupportedFunctionalityError({\n functionality: 'object-json mode',\n });\n }\n\n case 'object-tool': {\n throw new UnsupportedFunctionalityError({\n functionality: 'object-tool mode',\n });\n }\n\n case 'object-grammar': {\n throw new UnsupportedFunctionalityError({\n functionality: 'object-grammar mode',\n });\n }\n\n default: {\n const _exhaustiveCheck: never = type;\n throw new Error(`Unsupported type: ${_exhaustiveCheck}`);\n }\n }\n }\n\n async doGenerate(\n options: Parameters<LanguageModelV1['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {\n const { model, contentRequest, warnings } = this.getArgs(options);\n const { response } = await model.generateContent(contentRequest);\n\n const firstCandidate = response.candidates?.[0];\n\n if (firstCandidate == null) {\n throw new NoContentGeneratedError({ message: 'No candidates returned' });\n }\n\n const usageMetadata = response.usageMetadata;\n\n return {\n text: firstCandidate.content.parts.map(part => part.text).join(''),\n finishReason: mapGoogleVertexFinishReason({\n finishReason: firstCandidate.finishReason,\n hasToolCalls: false,\n }),\n usage: {\n promptTokens: usageMetadata?.promptTokenCount ?? NaN,\n completionTokens: usageMetadata?.candidatesTokenCount ?? NaN,\n },\n rawCall: {\n rawPrompt: contentRequest,\n rawSettings: {},\n },\n warnings,\n };\n }\n\n async doStream(\n options: Parameters<LanguageModelV1['doStream']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {\n const { model, contentRequest, warnings } = this.getArgs(options);\n const { stream } = await model.generateContentStream(contentRequest);\n\n let finishReason: LanguageModelV1FinishReason = 'other';\n let usage: { promptTokens: number; completionTokens: number } = {\n promptTokens: Number.NaN,\n completionTokens: Number.NaN,\n };\n\n return {\n stream: convertAsyncGeneratorToReadableStream(stream).pipeThrough(\n new TransformStream<GenerateContentResponse, LanguageModelV1StreamPart>(\n {\n transform(chunk, controller) {\n const usageMetadata = chunk.usageMetadata;\n if (usageMetadata != null) {\n usage = {\n promptTokens: usageMetadata.promptTokenCount ?? NaN,\n completionTokens: usageMetadata.candidatesTokenCount ?? NaN,\n };\n }\n\n const firstCandidate = chunk.candidates?.[0];\n\n if (firstCandidate == null) {\n controller.enqueue({\n type: 'error',\n error: new NoContentGeneratedError({\n message: 'No candidates in chunk.',\n }),\n });\n return;\n }\n\n if (firstCandidate.finishReason != null) {\n finishReason = mapGoogleVertexFinishReason({\n finishReason: firstCandidate.finishReason,\n hasToolCalls: false,\n });\n }\n\n const textDelta = firstCandidate.content.parts\n .map(part => part.text)\n .join('');\n\n controller.enqueue({\n type: 'text-delta',\n textDelta,\n });\n },\n\n flush(controller) {\n controller.enqueue({\n type: 'finish',\n finishReason,\n usage,\n });\n },\n },\n ),\n ),\n rawCall: {\n rawPrompt: contentRequest,\n rawSettings: {},\n },\n warnings,\n };\n }\n}\n","import {\n LanguageModelV1Prompt,\n UnsupportedFunctionalityError,\n} from '@ai-sdk/provider';\nimport { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils';\nimport { Content, GenerateContentRequest } from '@google-cloud/vertexai';\n\nexport function convertToGoogleVertexContentRequest(\n prompt: LanguageModelV1Prompt,\n): GenerateContentRequest {\n let systemInstruction: string | undefined = undefined;\n const contents: Content[] = [];\n\n for (const { role, content } of prompt) {\n switch (role) {\n case 'system': {\n if (systemInstruction != null) {\n throw new UnsupportedFunctionalityError({\n functionality: 'Multiple system messages',\n });\n }\n\n systemInstruction = content;\n break;\n }\n\n case 'user': {\n contents.push({\n role: 'user',\n parts: content.map(part => {\n switch (part.type) {\n case 'text': {\n return { text: part.text };\n }\n\n case 'image': {\n if (part.image instanceof URL) {\n throw new UnsupportedFunctionalityError({\n functionality: 'URL image parts',\n });\n } else {\n return {\n inlineData: {\n data: convertUint8ArrayToBase64(part.image),\n mimeType: part.mimeType ?? 'image/jpeg',\n },\n };\n }\n }\n\n default: {\n const _exhaustiveCheck: never = part;\n throw new UnsupportedFunctionalityError({\n functionality: `prompt part: ${_exhaustiveCheck}`,\n });\n }\n }\n }),\n });\n break;\n }\n\n case 'assistant': {\n contents.push({\n role: 'assistant',\n parts: content.map(part => {\n switch (part.type) {\n case 'text': {\n return { type: 'text', text: part.text };\n }\n\n case 'tool-call': {\n throw new UnsupportedFunctionalityError({\n functionality: 'tool-call',\n });\n }\n\n default: {\n const _exhaustiveCheck: never = part;\n throw new UnsupportedFunctionalityError({\n functionality: `prompt part: ${_exhaustiveCheck}`,\n });\n }\n }\n }),\n });\n\n break;\n }\n\n case 'tool': {\n throw new UnsupportedFunctionalityError({\n functionality: `role: tool`,\n });\n }\n\n default: {\n const _exhaustiveCheck: never = role;\n throw new UnsupportedFunctionalityError({\n functionality: `role: ${_exhaustiveCheck}`,\n });\n }\n }\n }\n\n return {\n systemInstruction,\n contents,\n };\n}\n","import { LanguageModelV1FinishReason } from '@ai-sdk/provider';\nimport { FinishReason } from '@google-cloud/vertexai';\n\nexport function mapGoogleVertexFinishReason({\n finishReason,\n hasToolCalls,\n}: {\n finishReason: FinishReason | undefined;\n hasToolCalls: boolean;\n}): LanguageModelV1FinishReason {\n switch (finishReason) {\n case 'STOP':\n return hasToolCalls ? 'tool-calls' : 'stop';\n case 'MAX_TOKENS':\n return 'length';\n case 'BLOCKLIST':\n case 'PROHIBITED_CONTENT':\n case 'SPII':\n case 'RECITATION':\n case 'SAFETY':\n return 'content-filter';\n case 'FINISH_REASON_UNSPECIFIED':\n case 'OTHER':\n default:\n return 'other';\n }\n}\n"],"mappings":";AAAA,SAAS,YAAY,mBAAmB;AACxC,SAAS,gBAAgB;;;ACDzB;AAAA,EAME;AAAA,EACA,iCAAAA;AAAA,OACK;AACP,SAAS,6CAA6C;;;ACTtD;AAAA,EAEE;AAAA,OACK;AACP,SAAS,iCAAiC;AAGnC,SAAS,oCACd,QACwB;AACxB,MAAI,oBAAwC;AAC5C,QAAM,WAAsB,CAAC;AAE7B,aAAW,EAAE,MAAM,QAAQ,KAAK,QAAQ;AACtC,YAAQ,MAAM;AAAA,MACZ,KAAK,UAAU;AACb,YAAI,qBAAqB,MAAM;AAC7B,gBAAM,IAAI,8BAA8B;AAAA,YACtC,eAAe;AAAA,UACjB,CAAC;AAAA,QACH;AAEA,4BAAoB;AACpB;AAAA,MACF;AAAA,MAEA,KAAK,QAAQ;AACX,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,OAAO,QAAQ,IAAI,UAAQ;AA7BrC;AA8BY,oBAAQ,KAAK,MAAM;AAAA,cACjB,KAAK,QAAQ;AACX,uBAAO,EAAE,MAAM,KAAK,KAAK;AAAA,cAC3B;AAAA,cAEA,KAAK,SAAS;AACZ,oBAAI,KAAK,iBAAiB,KAAK;AAC7B,wBAAM,IAAI,8BAA8B;AAAA,oBACtC,eAAe;AAAA,kBACjB,CAAC;AAAA,gBACH,OAAO;AACL,yBAAO;AAAA,oBACL,YAAY;AAAA,sBACV,MAAM,0BAA0B,KAAK,KAAK;AAAA,sBAC1C,WAAU,UAAK,aAAL,YAAiB;AAAA,oBAC7B;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,cAEA,SAAS;AACP,sBAAM,mBAA0B;AAChC,sBAAM,IAAI,8BAA8B;AAAA,kBACtC,eAAe,gBAAgB,gBAAgB;AAAA,gBACjD,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF,CAAC;AAAA,QACH,CAAC;AACD;AAAA,MACF;AAAA,MAEA,KAAK,aAAa;AAChB,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,OAAO,QAAQ,IAAI,UAAQ;AACzB,oBAAQ,KAAK,MAAM;AAAA,cACjB,KAAK,QAAQ;AACX,uBAAO,EAAE,MAAM,QAAQ,MAAM,KAAK,KAAK;AAAA,cACzC;AAAA,cAEA,KAAK,aAAa;AAChB,sBAAM,IAAI,8BAA8B;AAAA,kBACtC,eAAe;AAAA,gBACjB,CAAC;AAAA,cACH;AAAA,cAEA,SAAS;AACP,sBAAM,mBAA0B;AAChC,sBAAM,IAAI,8BAA8B;AAAA,kBACtC,eAAe,gBAAgB,gBAAgB;AAAA,gBACjD,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF,CAAC;AAAA,QACH,CAAC;AAED;AAAA,MACF;AAAA,MAEA,KAAK,QAAQ;AACX,cAAM,IAAI,8BAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,SAAS;AACP,cAAM,mBAA0B;AAChC,cAAM,IAAI,8BAA8B;AAAA,UACtC,eAAe,SAAS,gBAAgB;AAAA,QAC1C,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,EACF;AACF;;;AC1GO,SAAS,4BAA4B;AAAA,EAC1C;AAAA,EACA;AACF,GAGgC;AAC9B,UAAQ,cAAc;AAAA,IACpB,KAAK;AACH,aAAO,eAAe,eAAe;AAAA,IACvC,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL,KAAK;AAAA,IACL;AACE,aAAO;AAAA,EACX;AACF;;;AFCO,IAAM,4BAAN,MAA2D;AAAA,EAUhE,YACE,SACA,UACA,QACA;AAbF,SAAS,uBAAuB;AAChC,SAAS,WAAW;AACpB,SAAS,8BAA8B;AAYrC,SAAK,UAAU;AACf,SAAK,WAAW;AAChB,SAAK,SAAS;AAAA,EAChB;AAAA,EAEQ,QAAQ;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAA+B;AAxDjC;AAyDI,UAAM,WAAyC,CAAC;AAEhD,QAAI,oBAAoB,MAAM;AAC5B,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,QAAI,mBAAmB,MAAM;AAC3B,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,UAAM,mBAAqC;AAAA;AAAA,MAEzC,MAAM,KAAK,SAAS;AAAA;AAAA,MAGpB,iBAAiB;AAAA,MACjB;AAAA,MACA;AAAA,IACF;AAEA,UAAM,OAAO,KAAK;AAElB,YAAQ,MAAM;AAAA,MACZ,KAAK,WAAW;AACd,aAAI,UAAK,UAAL,mBAAY,QAAQ;AACtB,gBAAM,IAAIC,+BAA8B;AAAA,YACtC,eAAe;AAAA,UACjB,CAAC;AAAA,QACH;AAEA,eAAO;AAAA,UACL,OAAO,KAAK,OAAO,SAAS,mBAAmB;AAAA,YAC7C,OAAO,KAAK;AAAA,YACZ;AAAA,UACF,CAAC;AAAA,UACD,gBAAgB,oCAAoC,MAAM;AAAA,UAC1D;AAAA,QACF;AAAA,MACF;AAAA,MAEA,KAAK,eAAe;AAClB,cAAM,IAAIA,+BAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,KAAK,eAAe;AAClB,cAAM,IAAIA,+BAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,KAAK,kBAAkB;AACrB,cAAM,IAAIA,+BAA8B;AAAA,UACtC,eAAe;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,SAAS;AACP,cAAM,mBAA0B;AAChC,cAAM,IAAI,MAAM,qBAAqB,gBAAgB,EAAE;AAAA,MACzD;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,WACJ,SAC6D;AAzIjE;AA0II,UAAM,EAAE,OAAO,gBAAgB,SAAS,IAAI,KAAK,QAAQ,OAAO;AAChE,UAAM,EAAE,SAAS,IAAI,MAAM,MAAM,gBAAgB,cAAc;AAE/D,UAAM,kBAAiB,cAAS,eAAT,mBAAsB;AAE7C,QAAI,kBAAkB,MAAM;AAC1B,YAAM,IAAI,wBAAwB,EAAE,SAAS,yBAAyB,CAAC;AAAA,IACzE;AAEA,UAAM,gBAAgB,SAAS;AAE/B,WAAO;AAAA,MACL,MAAM,eAAe,QAAQ,MAAM,IAAI,UAAQ,KAAK,IAAI,EAAE,KAAK,EAAE;AAAA,MACjE,cAAc,4BAA4B;AAAA,QACxC,cAAc,eAAe;AAAA,QAC7B,cAAc;AAAA,MAChB,CAAC;AAAA,MACD,OAAO;AAAA,QACL,eAAc,oDAAe,qBAAf,YAAmC;AAAA,QACjD,mBAAkB,oDAAe,yBAAf,YAAuC;AAAA,MAC3D;AAAA,MACA,SAAS;AAAA,QACP,WAAW;AAAA,QACX,aAAa,CAAC;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,SACJ,SAC2D;AAC3D,UAAM,EAAE,OAAO,gBAAgB,SAAS,IAAI,KAAK,QAAQ,OAAO;AAChE,UAAM,EAAE,OAAO,IAAI,MAAM,MAAM,sBAAsB,cAAc;AAEnE,QAAI,eAA4C;AAChD,QAAI,QAA4D;AAAA,MAC9D,cAAc,OAAO;AAAA,MACrB,kBAAkB,OAAO;AAAA,IAC3B;AAEA,WAAO;AAAA,MACL,QAAQ,sCAAsC,MAAM,EAAE;AAAA,QACpD,IAAI;AAAA,UACF;AAAA,YACE,UAAU,OAAO,YAAY;AAvLzC;AAwLc,oBAAM,gBAAgB,MAAM;AAC5B,kBAAI,iBAAiB,MAAM;AACzB,wBAAQ;AAAA,kBACN,eAAc,mBAAc,qBAAd,YAAkC;AAAA,kBAChD,mBAAkB,mBAAc,yBAAd,YAAsC;AAAA,gBAC1D;AAAA,cACF;AAEA,oBAAM,kBAAiB,WAAM,eAAN,mBAAmB;AAE1C,kBAAI,kBAAkB,MAAM;AAC1B,2BAAW,QAAQ;AAAA,kBACjB,MAAM;AAAA,kBACN,OAAO,IAAI,wBAAwB;AAAA,oBACjC,SAAS;AAAA,kBACX,CAAC;AAAA,gBACH,CAAC;AACD;AAAA,cACF;AAEA,kBAAI,eAAe,gBAAgB,MAAM;AACvC,+BAAe,4BAA4B;AAAA,kBACzC,cAAc,eAAe;AAAA,kBAC7B,cAAc;AAAA,gBAChB,CAAC;AAAA,cACH;AAEA,oBAAM,YAAY,eAAe,QAAQ,MACtC,IAAI,UAAQ,KAAK,IAAI,EACrB,KAAK,EAAE;AAEV,yBAAW,QAAQ;AAAA,gBACjB,MAAM;AAAA,gBACN;AAAA,cACF,CAAC;AAAA,YACH;AAAA,YAEA,MAAM,YAAY;AAChB,yBAAW,QAAQ;AAAA,gBACjB,MAAM;AAAA,gBACN;AAAA,gBACA;AAAA,cACF,CAAC;AAAA,YACH;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,MACA,SAAS;AAAA,QACP,WAAW;AAAA,QACX,aAAa,CAAC;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AACF;;;ADjMO,SAAS,aACd,UAAwC,CAAC,GACnB;AACtB,QAAM,iBAAiB,MAAM;AAhD/B;AAiDI,UAAM,SAAS;AAAA,MACb,SAAS,YAAY;AAAA,QACnB,cAAc,QAAQ;AAAA,QACtB,aAAa;AAAA,QACb,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC;AAAA,MACD,UAAU,YAAY;AAAA,QACpB,cAAc,QAAQ;AAAA,QACtB,aAAa;AAAA,QACb,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC;AAAA,IACH;AAEA,YAAO,mBAAQ,mBAAR,iCAAyB,YAAzB,YAAoC,IAAI,SAAS,MAAM;AAAA,EAChE;AAEA,QAAM,kBAAkB,CACtB,SACA,WAAiC,CAAC,MAClC;AAtEJ;AAuEI,eAAI,0BAA0B,SAAS,UAAU;AAAA,MAC/C,UAAU,eAAe;AAAA,MACzB,aAAY,aAAQ,eAAR,YAAsB;AAAA,IACpC,CAAC;AAAA;AAEH,QAAM,WAAW,SACf,SACA,UACA;AACA,QAAI,YAAY;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,WAAO,gBAAgB,SAAS,QAAQ;AAAA,EAC1C;AAEA,WAAS,OAAO;AAEhB,SAAO;AACT;AAKO,IAAM,SAAS,aAAa;","names":["UnsupportedFunctionalityError","UnsupportedFunctionalityError"]}
|
package/package.json
ADDED
@@ -0,0 +1,68 @@
|
|
1
|
+
{
|
2
|
+
"name": "@ai-sdk/google-vertex",
|
3
|
+
"version": "0.0.1",
|
4
|
+
"license": "Apache-2.0",
|
5
|
+
"sideEffects": false,
|
6
|
+
"main": "./dist/index.js",
|
7
|
+
"module": "./dist/index.mjs",
|
8
|
+
"types": "./dist/index.d.ts",
|
9
|
+
"files": [
|
10
|
+
"dist/**/*"
|
11
|
+
],
|
12
|
+
"exports": {
|
13
|
+
"./package.json": "./package.json",
|
14
|
+
".": {
|
15
|
+
"types": "./dist/index.d.ts",
|
16
|
+
"import": "./dist/index.mjs",
|
17
|
+
"require": "./dist/index.js"
|
18
|
+
}
|
19
|
+
},
|
20
|
+
"dependencies": {
|
21
|
+
"@ai-sdk/provider": "0.0.6",
|
22
|
+
"@ai-sdk/provider-utils": "0.0.9",
|
23
|
+
"@google-cloud/vertexai": "^1.2.0"
|
24
|
+
},
|
25
|
+
"devDependencies": {
|
26
|
+
"@types/node": "^18",
|
27
|
+
"tsup": "^8",
|
28
|
+
"typescript": "5.1.3",
|
29
|
+
"zod": "3.22.4",
|
30
|
+
"@vercel/ai-tsconfig": "0.0.0"
|
31
|
+
},
|
32
|
+
"peerDependencies": {
|
33
|
+
"zod": "^3.0.0"
|
34
|
+
},
|
35
|
+
"peerDependenciesMeta": {
|
36
|
+
"zod": {
|
37
|
+
"optional": true
|
38
|
+
}
|
39
|
+
},
|
40
|
+
"engines": {
|
41
|
+
"node": ">=18"
|
42
|
+
},
|
43
|
+
"publishConfig": {
|
44
|
+
"access": "public"
|
45
|
+
},
|
46
|
+
"homepage": "https://sdk.vercel.ai/docs",
|
47
|
+
"repository": {
|
48
|
+
"type": "git",
|
49
|
+
"url": "git+https://github.com/vercel/ai.git"
|
50
|
+
},
|
51
|
+
"bugs": {
|
52
|
+
"url": "https://github.com/vercel/ai/issues"
|
53
|
+
},
|
54
|
+
"keywords": [
|
55
|
+
"ai"
|
56
|
+
],
|
57
|
+
"scripts": {
|
58
|
+
"build": "tsup",
|
59
|
+
"clean": "rm -rf dist",
|
60
|
+
"dev": "tsup --watch",
|
61
|
+
"lint": "eslint \"./**/*.ts*\"",
|
62
|
+
"type-check": "tsc --noEmit",
|
63
|
+
"prettier-check": "prettier --check \"./**/*.ts*\"",
|
64
|
+
"test": "pnpm test:node && pnpm test:edge",
|
65
|
+
"test:edge": "vitest --config vitest.edge.config.js --run",
|
66
|
+
"test:node": "vitest --config vitest.node.config.js --run"
|
67
|
+
}
|
68
|
+
}
|