@ai-sdk/google 1.2.17 → 2.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +172 -32
- package/dist/index.d.mts +102 -86
- package/dist/index.d.ts +102 -86
- package/dist/index.js +447 -363
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +452 -363
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +13 -92
- package/{internal/dist → dist/internal}/index.d.ts +13 -92
- package/{internal/dist → dist/internal}/index.js +299 -258
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +302 -259
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
package/dist/index.js
CHANGED
@@ -26,15 +26,136 @@ __export(src_exports, {
|
|
26
26
|
module.exports = __toCommonJS(src_exports);
|
27
27
|
|
28
28
|
// src/google-provider.ts
|
29
|
+
var import_provider4 = require("@ai-sdk/provider");
|
29
30
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
30
31
|
|
31
|
-
// src/google-generative-ai-
|
32
|
-
var
|
32
|
+
// src/google-generative-ai-embedding-model.ts
|
33
|
+
var import_provider = require("@ai-sdk/provider");
|
34
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
35
|
+
var import_zod3 = require("zod");
|
36
|
+
|
37
|
+
// src/google-error.ts
|
38
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
39
|
+
var import_zod = require("zod");
|
40
|
+
var googleErrorDataSchema = import_zod.z.object({
|
41
|
+
error: import_zod.z.object({
|
42
|
+
code: import_zod.z.number().nullable(),
|
43
|
+
message: import_zod.z.string(),
|
44
|
+
status: import_zod.z.string()
|
45
|
+
})
|
46
|
+
});
|
47
|
+
var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
48
|
+
errorSchema: googleErrorDataSchema,
|
49
|
+
errorToMessage: (data) => data.error.message
|
50
|
+
});
|
51
|
+
|
52
|
+
// src/google-generative-ai-embedding-options.ts
|
33
53
|
var import_zod2 = require("zod");
|
54
|
+
var googleGenerativeAIEmbeddingProviderOptions = import_zod2.z.object({
|
55
|
+
/**
|
56
|
+
* Optional. Optional reduced dimension for the output embedding.
|
57
|
+
* If set, excessive values in the output embedding are truncated from the end.
|
58
|
+
*/
|
59
|
+
outputDimensionality: import_zod2.z.number().optional(),
|
60
|
+
/**
|
61
|
+
* Optional. Specifies the task type for generating embeddings.
|
62
|
+
* Supported task types:
|
63
|
+
* - SEMANTIC_SIMILARITY: Optimized for text similarity.
|
64
|
+
* - CLASSIFICATION: Optimized for text classification.
|
65
|
+
* - CLUSTERING: Optimized for clustering texts based on similarity.
|
66
|
+
* - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
|
67
|
+
* - RETRIEVAL_QUERY: Optimized for query-based retrieval.
|
68
|
+
* - QUESTION_ANSWERING: Optimized for answering questions.
|
69
|
+
* - FACT_VERIFICATION: Optimized for verifying factual information.
|
70
|
+
* - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
|
71
|
+
*/
|
72
|
+
taskType: import_zod2.z.enum([
|
73
|
+
"SEMANTIC_SIMILARITY",
|
74
|
+
"CLASSIFICATION",
|
75
|
+
"CLUSTERING",
|
76
|
+
"RETRIEVAL_DOCUMENT",
|
77
|
+
"RETRIEVAL_QUERY",
|
78
|
+
"QUESTION_ANSWERING",
|
79
|
+
"FACT_VERIFICATION",
|
80
|
+
"CODE_RETRIEVAL_QUERY"
|
81
|
+
]).optional()
|
82
|
+
});
|
83
|
+
|
84
|
+
// src/google-generative-ai-embedding-model.ts
|
85
|
+
var GoogleGenerativeAIEmbeddingModel = class {
|
86
|
+
constructor(modelId, config) {
|
87
|
+
this.specificationVersion = "v2";
|
88
|
+
this.maxEmbeddingsPerCall = 2048;
|
89
|
+
this.supportsParallelCalls = true;
|
90
|
+
this.modelId = modelId;
|
91
|
+
this.config = config;
|
92
|
+
}
|
93
|
+
get provider() {
|
94
|
+
return this.config.provider;
|
95
|
+
}
|
96
|
+
async doEmbed({
|
97
|
+
values,
|
98
|
+
headers,
|
99
|
+
abortSignal,
|
100
|
+
providerOptions
|
101
|
+
}) {
|
102
|
+
const googleOptions = await (0, import_provider_utils2.parseProviderOptions)({
|
103
|
+
provider: "google",
|
104
|
+
providerOptions,
|
105
|
+
schema: googleGenerativeAIEmbeddingProviderOptions
|
106
|
+
});
|
107
|
+
if (values.length > this.maxEmbeddingsPerCall) {
|
108
|
+
throw new import_provider.TooManyEmbeddingValuesForCallError({
|
109
|
+
provider: this.provider,
|
110
|
+
modelId: this.modelId,
|
111
|
+
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
112
|
+
values
|
113
|
+
});
|
114
|
+
}
|
115
|
+
const mergedHeaders = (0, import_provider_utils2.combineHeaders)(
|
116
|
+
await (0, import_provider_utils2.resolve)(this.config.headers),
|
117
|
+
headers
|
118
|
+
);
|
119
|
+
const {
|
120
|
+
responseHeaders,
|
121
|
+
value: response,
|
122
|
+
rawValue
|
123
|
+
} = await (0, import_provider_utils2.postJsonToApi)({
|
124
|
+
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
125
|
+
headers: mergedHeaders,
|
126
|
+
body: {
|
127
|
+
requests: values.map((value) => ({
|
128
|
+
model: `models/${this.modelId}`,
|
129
|
+
content: { role: "user", parts: [{ text: value }] },
|
130
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
131
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
132
|
+
}))
|
133
|
+
},
|
134
|
+
failedResponseHandler: googleFailedResponseHandler,
|
135
|
+
successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
|
136
|
+
googleGenerativeAITextEmbeddingResponseSchema
|
137
|
+
),
|
138
|
+
abortSignal,
|
139
|
+
fetch: this.config.fetch
|
140
|
+
});
|
141
|
+
return {
|
142
|
+
embeddings: response.embeddings.map((item) => item.values),
|
143
|
+
usage: void 0,
|
144
|
+
response: { headers: responseHeaders, body: rawValue }
|
145
|
+
};
|
146
|
+
}
|
147
|
+
};
|
148
|
+
var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
|
149
|
+
embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
|
150
|
+
});
|
151
|
+
|
152
|
+
// src/google-generative-ai-language-model.ts
|
153
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
154
|
+
var import_zod5 = require("zod");
|
34
155
|
|
35
156
|
// src/convert-json-schema-to-openapi-schema.ts
|
36
157
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
37
|
-
if (isEmptyObjectSchema(jsonSchema)) {
|
158
|
+
if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
|
38
159
|
return void 0;
|
39
160
|
}
|
40
161
|
if (typeof jsonSchema === "boolean") {
|
@@ -130,10 +251,9 @@ function isEmptyObjectSchema(jsonSchema) {
|
|
130
251
|
}
|
131
252
|
|
132
253
|
// src/convert-to-google-generative-ai-messages.ts
|
133
|
-
var
|
134
|
-
var
|
254
|
+
var import_provider2 = require("@ai-sdk/provider");
|
255
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
135
256
|
function convertToGoogleGenerativeAIMessages(prompt) {
|
136
|
-
var _a, _b;
|
137
257
|
const systemInstructionParts = [];
|
138
258
|
const contents = [];
|
139
259
|
let systemMessagesAllowed = true;
|
@@ -141,7 +261,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
141
261
|
switch (role) {
|
142
262
|
case "system": {
|
143
263
|
if (!systemMessagesAllowed) {
|
144
|
-
throw new
|
264
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
145
265
|
functionality: "system messages are only supported at the beginning of the conversation"
|
146
266
|
});
|
147
267
|
}
|
@@ -157,33 +277,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
157
277
|
parts.push({ text: part.text });
|
158
278
|
break;
|
159
279
|
}
|
160
|
-
case "image": {
|
161
|
-
parts.push(
|
162
|
-
part.image instanceof URL ? {
|
163
|
-
fileData: {
|
164
|
-
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
|
165
|
-
fileUri: part.image.toString()
|
166
|
-
}
|
167
|
-
} : {
|
168
|
-
inlineData: {
|
169
|
-
mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
|
170
|
-
data: (0, import_provider_utils.convertUint8ArrayToBase64)(part.image)
|
171
|
-
}
|
172
|
-
}
|
173
|
-
);
|
174
|
-
break;
|
175
|
-
}
|
176
280
|
case "file": {
|
281
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
177
282
|
parts.push(
|
178
283
|
part.data instanceof URL ? {
|
179
284
|
fileData: {
|
180
|
-
mimeType:
|
285
|
+
mimeType: mediaType,
|
181
286
|
fileUri: part.data.toString()
|
182
287
|
}
|
183
288
|
} : {
|
184
289
|
inlineData: {
|
185
|
-
mimeType:
|
186
|
-
data: part.data
|
290
|
+
mimeType: mediaType,
|
291
|
+
data: (0, import_provider_utils3.convertToBase64)(part.data)
|
187
292
|
}
|
188
293
|
}
|
189
294
|
);
|
@@ -204,20 +309,20 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
204
309
|
return part.text.length === 0 ? void 0 : { text: part.text };
|
205
310
|
}
|
206
311
|
case "file": {
|
207
|
-
if (part.
|
208
|
-
throw new
|
312
|
+
if (part.mediaType !== "image/png") {
|
313
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
209
314
|
functionality: "Only PNG images are supported in assistant messages"
|
210
315
|
});
|
211
316
|
}
|
212
317
|
if (part.data instanceof URL) {
|
213
|
-
throw new
|
318
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
214
319
|
functionality: "File data URLs in assistant messages are not supported"
|
215
320
|
});
|
216
321
|
}
|
217
322
|
return {
|
218
323
|
inlineData: {
|
219
|
-
mimeType: part.
|
220
|
-
data: part.data
|
324
|
+
mimeType: part.mediaType,
|
325
|
+
data: (0, import_provider_utils3.convertToBase64)(part.data)
|
221
326
|
}
|
222
327
|
};
|
223
328
|
}
|
@@ -263,33 +368,110 @@ function getModelPath(modelId) {
|
|
263
368
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
264
369
|
}
|
265
370
|
|
266
|
-
// src/google-
|
267
|
-
var
|
268
|
-
var
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
371
|
+
// src/google-generative-ai-options.ts
|
372
|
+
var import_zod4 = require("zod");
|
373
|
+
var dynamicRetrievalConfig = import_zod4.z.object({
|
374
|
+
/**
|
375
|
+
* The mode of the predictor to be used in dynamic retrieval.
|
376
|
+
*/
|
377
|
+
mode: import_zod4.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
|
378
|
+
/**
|
379
|
+
* The threshold to be used in dynamic retrieval. If not set, a system default
|
380
|
+
* value is used.
|
381
|
+
*/
|
382
|
+
dynamicThreshold: import_zod4.z.number().optional()
|
275
383
|
});
|
276
|
-
var
|
277
|
-
|
278
|
-
|
384
|
+
var googleGenerativeAIProviderOptions = import_zod4.z.object({
|
385
|
+
responseModalities: import_zod4.z.array(import_zod4.z.enum(["TEXT", "IMAGE"])).optional(),
|
386
|
+
thinkingConfig: import_zod4.z.object({
|
387
|
+
thinkingBudget: import_zod4.z.number().optional()
|
388
|
+
}).optional(),
|
389
|
+
/**
|
390
|
+
Optional.
|
391
|
+
The name of the cached content used as context to serve the prediction.
|
392
|
+
Format: cachedContents/{cachedContent}
|
393
|
+
*/
|
394
|
+
cachedContent: import_zod4.z.string().optional(),
|
395
|
+
/**
|
396
|
+
* Optional. Enable structured output. Default is true.
|
397
|
+
*
|
398
|
+
* This is useful when the JSON Schema contains elements that are
|
399
|
+
* not supported by the OpenAPI schema version that
|
400
|
+
* Google Generative AI uses. You can use this to disable
|
401
|
+
* structured outputs if you need to.
|
402
|
+
*/
|
403
|
+
structuredOutputs: import_zod4.z.boolean().optional(),
|
404
|
+
/**
|
405
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
406
|
+
*/
|
407
|
+
safetySettings: import_zod4.z.array(
|
408
|
+
import_zod4.z.object({
|
409
|
+
category: import_zod4.z.enum([
|
410
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
411
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
412
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
413
|
+
"HARM_CATEGORY_HARASSMENT",
|
414
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
415
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
416
|
+
]),
|
417
|
+
threshold: import_zod4.z.enum([
|
418
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
419
|
+
"BLOCK_LOW_AND_ABOVE",
|
420
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
421
|
+
"BLOCK_ONLY_HIGH",
|
422
|
+
"BLOCK_NONE",
|
423
|
+
"OFF"
|
424
|
+
])
|
425
|
+
})
|
426
|
+
).optional(),
|
427
|
+
threshold: import_zod4.z.enum([
|
428
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
429
|
+
"BLOCK_LOW_AND_ABOVE",
|
430
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
431
|
+
"BLOCK_ONLY_HIGH",
|
432
|
+
"BLOCK_NONE",
|
433
|
+
"OFF"
|
434
|
+
]).optional(),
|
435
|
+
/**
|
436
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
437
|
+
*
|
438
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
439
|
+
*/
|
440
|
+
audioTimestamp: import_zod4.z.boolean().optional(),
|
441
|
+
/**
|
442
|
+
Optional. When enabled, the model will use Google search to ground the response.
|
443
|
+
|
444
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
|
445
|
+
*/
|
446
|
+
useSearchGrounding: import_zod4.z.boolean().optional(),
|
447
|
+
/**
|
448
|
+
Optional. Specifies the dynamic retrieval configuration.
|
449
|
+
|
450
|
+
@note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
|
451
|
+
|
452
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
|
453
|
+
*/
|
454
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
|
279
455
|
});
|
280
456
|
|
281
457
|
// src/google-prepare-tools.ts
|
282
|
-
var
|
283
|
-
function prepareTools(
|
284
|
-
|
285
|
-
|
458
|
+
var import_provider3 = require("@ai-sdk/provider");
|
459
|
+
function prepareTools({
|
460
|
+
tools,
|
461
|
+
toolChoice,
|
462
|
+
useSearchGrounding,
|
463
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig2,
|
464
|
+
modelId
|
465
|
+
}) {
|
466
|
+
var _a;
|
467
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
286
468
|
const toolWarnings = [];
|
287
469
|
const isGemini2 = modelId.includes("gemini-2");
|
288
470
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
289
471
|
if (useSearchGrounding) {
|
290
472
|
return {
|
291
473
|
tools: isGemini2 ? { googleSearch: {} } : {
|
292
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !
|
474
|
+
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
|
293
475
|
},
|
294
476
|
toolConfig: void 0,
|
295
477
|
toolWarnings
|
@@ -305,12 +487,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
305
487
|
} else {
|
306
488
|
functionDeclarations.push({
|
307
489
|
name: tool.name,
|
308
|
-
description: (
|
490
|
+
description: (_a = tool.description) != null ? _a : "",
|
309
491
|
parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
|
310
492
|
});
|
311
493
|
}
|
312
494
|
}
|
313
|
-
const toolChoice = mode.toolChoice;
|
314
495
|
if (toolChoice == null) {
|
315
496
|
return {
|
316
497
|
tools: { functionDeclarations },
|
@@ -351,8 +532,8 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
351
532
|
};
|
352
533
|
default: {
|
353
534
|
const _exhaustiveCheck = type;
|
354
|
-
throw new
|
355
|
-
functionality: `
|
535
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
536
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
356
537
|
});
|
357
538
|
}
|
358
539
|
}
|
@@ -387,25 +568,21 @@ function mapGoogleGenerativeAIFinishReason({
|
|
387
568
|
|
388
569
|
// src/google-generative-ai-language-model.ts
|
389
570
|
var GoogleGenerativeAILanguageModel = class {
|
390
|
-
constructor(modelId,
|
391
|
-
this.specificationVersion = "
|
392
|
-
this.defaultObjectGenerationMode = "json";
|
393
|
-
this.supportsImageUrls = false;
|
571
|
+
constructor(modelId, config) {
|
572
|
+
this.specificationVersion = "v2";
|
394
573
|
this.modelId = modelId;
|
395
|
-
this.settings = settings;
|
396
574
|
this.config = config;
|
397
575
|
}
|
398
|
-
get supportsStructuredOutputs() {
|
399
|
-
var _a;
|
400
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
401
|
-
}
|
402
576
|
get provider() {
|
403
577
|
return this.config.provider;
|
404
578
|
}
|
579
|
+
get supportedUrls() {
|
580
|
+
var _a, _b, _c;
|
581
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
582
|
+
}
|
405
583
|
async getArgs({
|
406
|
-
mode,
|
407
584
|
prompt,
|
408
|
-
|
585
|
+
maxOutputTokens,
|
409
586
|
temperature,
|
410
587
|
topP,
|
411
588
|
topK,
|
@@ -414,195 +591,168 @@ var GoogleGenerativeAILanguageModel = class {
|
|
414
591
|
stopSequences,
|
415
592
|
responseFormat,
|
416
593
|
seed,
|
417
|
-
|
594
|
+
tools,
|
595
|
+
toolChoice,
|
596
|
+
providerOptions
|
418
597
|
}) {
|
419
598
|
var _a, _b;
|
420
|
-
const type = mode.type;
|
421
599
|
const warnings = [];
|
422
|
-
const googleOptions = (0,
|
600
|
+
const googleOptions = await (0, import_provider_utils4.parseProviderOptions)({
|
423
601
|
provider: "google",
|
424
|
-
providerOptions
|
425
|
-
schema:
|
602
|
+
providerOptions,
|
603
|
+
schema: googleGenerativeAIProviderOptions
|
426
604
|
});
|
427
|
-
const generationConfig = {
|
428
|
-
// standardized settings:
|
429
|
-
maxOutputTokens: maxTokens,
|
430
|
-
temperature,
|
431
|
-
topK,
|
432
|
-
topP,
|
433
|
-
frequencyPenalty,
|
434
|
-
presencePenalty,
|
435
|
-
stopSequences,
|
436
|
-
seed,
|
437
|
-
// response format:
|
438
|
-
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
439
|
-
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
440
|
-
// so this is needed as an escape hatch:
|
441
|
-
this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
442
|
-
...this.settings.audioTimestamp && {
|
443
|
-
audioTimestamp: this.settings.audioTimestamp
|
444
|
-
},
|
445
|
-
// provider options:
|
446
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
447
|
-
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
448
|
-
};
|
449
605
|
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
contents,
|
482
|
-
systemInstruction,
|
483
|
-
safetySettings: this.settings.safetySettings,
|
484
|
-
cachedContent: this.settings.cachedContent
|
485
|
-
},
|
486
|
-
warnings
|
487
|
-
};
|
488
|
-
}
|
489
|
-
case "object-tool": {
|
490
|
-
return {
|
491
|
-
args: {
|
492
|
-
generationConfig,
|
493
|
-
contents,
|
494
|
-
tools: {
|
495
|
-
functionDeclarations: [
|
496
|
-
{
|
497
|
-
name: mode.tool.name,
|
498
|
-
description: (_b = mode.tool.description) != null ? _b : "",
|
499
|
-
parameters: convertJSONSchemaToOpenAPISchema(
|
500
|
-
mode.tool.parameters
|
501
|
-
)
|
502
|
-
}
|
503
|
-
]
|
504
|
-
},
|
505
|
-
toolConfig: { functionCallingConfig: { mode: "ANY" } },
|
506
|
-
safetySettings: this.settings.safetySettings,
|
507
|
-
cachedContent: this.settings.cachedContent
|
606
|
+
const {
|
607
|
+
tools: googleTools,
|
608
|
+
toolConfig: googleToolConfig,
|
609
|
+
toolWarnings
|
610
|
+
} = prepareTools({
|
611
|
+
tools,
|
612
|
+
toolChoice,
|
613
|
+
useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
|
614
|
+
dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
|
615
|
+
modelId: this.modelId
|
616
|
+
});
|
617
|
+
return {
|
618
|
+
args: {
|
619
|
+
generationConfig: {
|
620
|
+
// standardized settings:
|
621
|
+
maxOutputTokens,
|
622
|
+
temperature,
|
623
|
+
topK,
|
624
|
+
topP,
|
625
|
+
frequencyPenalty,
|
626
|
+
presencePenalty,
|
627
|
+
stopSequences,
|
628
|
+
seed,
|
629
|
+
// response format:
|
630
|
+
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
631
|
+
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
632
|
+
// so this is needed as an escape hatch:
|
633
|
+
// TODO convert into provider option
|
634
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
635
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
636
|
+
audioTimestamp: googleOptions.audioTimestamp
|
508
637
|
},
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
638
|
+
// provider options:
|
639
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
640
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
641
|
+
},
|
642
|
+
contents,
|
643
|
+
systemInstruction,
|
644
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
645
|
+
tools: googleTools,
|
646
|
+
toolConfig: googleToolConfig,
|
647
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
648
|
+
},
|
649
|
+
warnings: [...warnings, ...toolWarnings]
|
650
|
+
};
|
520
651
|
}
|
521
652
|
async doGenerate(options) {
|
522
|
-
var _a, _b, _c, _d, _e;
|
653
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
523
654
|
const { args, warnings } = await this.getArgs(options);
|
524
655
|
const body = JSON.stringify(args);
|
525
|
-
const mergedHeaders = (0,
|
526
|
-
await (0,
|
656
|
+
const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
|
657
|
+
await (0, import_provider_utils4.resolve)(this.config.headers),
|
527
658
|
options.headers
|
528
659
|
);
|
529
660
|
const {
|
530
661
|
responseHeaders,
|
531
662
|
value: response,
|
532
663
|
rawValue: rawResponse
|
533
|
-
} = await (0,
|
664
|
+
} = await (0, import_provider_utils4.postJsonToApi)({
|
534
665
|
url: `${this.config.baseURL}/${getModelPath(
|
535
666
|
this.modelId
|
536
667
|
)}:generateContent`,
|
537
668
|
headers: mergedHeaders,
|
538
669
|
body: args,
|
539
670
|
failedResponseHandler: googleFailedResponseHandler,
|
540
|
-
successfulResponseHandler: (0,
|
671
|
+
successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(responseSchema),
|
541
672
|
abortSignal: options.abortSignal,
|
542
673
|
fetch: this.config.fetch
|
543
674
|
});
|
544
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
545
675
|
const candidate = response.candidates[0];
|
546
|
-
const
|
547
|
-
const
|
548
|
-
|
676
|
+
const content = [];
|
677
|
+
const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
|
678
|
+
for (const part of parts) {
|
679
|
+
if ("text" in part && part.text.length > 0) {
|
680
|
+
content.push({ type: "text", text: part.text });
|
681
|
+
} else if ("functionCall" in part) {
|
682
|
+
content.push({
|
683
|
+
type: "tool-call",
|
684
|
+
toolCallType: "function",
|
685
|
+
toolCallId: this.config.generateId(),
|
686
|
+
toolName: part.functionCall.name,
|
687
|
+
args: JSON.stringify(part.functionCall.args)
|
688
|
+
});
|
689
|
+
} else if ("inlineData" in part) {
|
690
|
+
content.push({
|
691
|
+
type: "file",
|
692
|
+
data: part.inlineData.data,
|
693
|
+
mediaType: part.inlineData.mimeType
|
694
|
+
});
|
695
|
+
}
|
696
|
+
}
|
697
|
+
const sources = (_b = extractSources({
|
698
|
+
groundingMetadata: candidate.groundingMetadata,
|
549
699
|
generateId: this.config.generateId
|
550
|
-
});
|
700
|
+
})) != null ? _b : [];
|
701
|
+
for (const source of sources) {
|
702
|
+
content.push(source);
|
703
|
+
}
|
551
704
|
const usageMetadata = response.usageMetadata;
|
552
705
|
return {
|
553
|
-
|
554
|
-
files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
|
555
|
-
data: part.inlineData.data,
|
556
|
-
mimeType: part.inlineData.mimeType
|
557
|
-
})),
|
558
|
-
toolCalls,
|
706
|
+
content,
|
559
707
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
560
708
|
finishReason: candidate.finishReason,
|
561
|
-
hasToolCalls:
|
709
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
562
710
|
}),
|
563
711
|
usage: {
|
564
|
-
|
565
|
-
|
712
|
+
inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
|
713
|
+
outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
|
714
|
+
totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
|
715
|
+
reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
|
716
|
+
cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
|
566
717
|
},
|
567
|
-
rawCall: { rawPrompt, rawSettings },
|
568
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
569
718
|
warnings,
|
570
719
|
providerMetadata: {
|
571
720
|
google: {
|
572
|
-
groundingMetadata: (
|
573
|
-
safetyRatings: (
|
721
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
722
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
574
723
|
}
|
575
724
|
},
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
725
|
+
request: { body },
|
726
|
+
response: {
|
727
|
+
// TODO timestamp, model id, id
|
728
|
+
headers: responseHeaders,
|
729
|
+
body: rawResponse
|
730
|
+
}
|
581
731
|
};
|
582
732
|
}
|
583
733
|
async doStream(options) {
|
584
734
|
const { args, warnings } = await this.getArgs(options);
|
585
735
|
const body = JSON.stringify(args);
|
586
|
-
const headers = (0,
|
587
|
-
await (0,
|
736
|
+
const headers = (0, import_provider_utils4.combineHeaders)(
|
737
|
+
await (0, import_provider_utils4.resolve)(this.config.headers),
|
588
738
|
options.headers
|
589
739
|
);
|
590
|
-
const { responseHeaders, value: response } = await (0,
|
740
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
591
741
|
url: `${this.config.baseURL}/${getModelPath(
|
592
742
|
this.modelId
|
593
743
|
)}:streamGenerateContent?alt=sse`,
|
594
744
|
headers,
|
595
745
|
body: args,
|
596
746
|
failedResponseHandler: googleFailedResponseHandler,
|
597
|
-
successfulResponseHandler: (0,
|
747
|
+
successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(chunkSchema),
|
598
748
|
abortSignal: options.abortSignal,
|
599
749
|
fetch: this.config.fetch
|
600
750
|
});
|
601
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
602
751
|
let finishReason = "unknown";
|
603
|
-
|
604
|
-
|
605
|
-
|
752
|
+
const usage = {
|
753
|
+
inputTokens: void 0,
|
754
|
+
outputTokens: void 0,
|
755
|
+
totalTokens: void 0
|
606
756
|
};
|
607
757
|
let providerMetadata = void 0;
|
608
758
|
const generateId2 = this.config.generateId;
|
@@ -610,8 +760,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
610
760
|
return {
|
611
761
|
stream: response.pipeThrough(
|
612
762
|
new TransformStream({
|
763
|
+
start(controller) {
|
764
|
+
controller.enqueue({ type: "stream-start", warnings });
|
765
|
+
},
|
613
766
|
transform(chunk, controller) {
|
614
|
-
var _a, _b, _c, _d, _e, _f;
|
767
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
615
768
|
if (!chunk.success) {
|
616
769
|
controller.enqueue({ type: "error", error: chunk.error });
|
617
770
|
return;
|
@@ -619,12 +772,13 @@ var GoogleGenerativeAILanguageModel = class {
|
|
619
772
|
const value = chunk.value;
|
620
773
|
const usageMetadata = value.usageMetadata;
|
621
774
|
if (usageMetadata != null) {
|
622
|
-
usage =
|
623
|
-
|
624
|
-
|
625
|
-
|
775
|
+
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
776
|
+
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
777
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
778
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
779
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
626
780
|
}
|
627
|
-
const candidate = (
|
781
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
628
782
|
if (candidate == null) {
|
629
783
|
return;
|
630
784
|
}
|
@@ -632,17 +786,14 @@ var GoogleGenerativeAILanguageModel = class {
|
|
632
786
|
if (content != null) {
|
633
787
|
const deltaText = getTextFromParts(content.parts);
|
634
788
|
if (deltaText != null) {
|
635
|
-
controller.enqueue(
|
636
|
-
type: "text-delta",
|
637
|
-
textDelta: deltaText
|
638
|
-
});
|
789
|
+
controller.enqueue(deltaText);
|
639
790
|
}
|
640
791
|
const inlineDataParts = getInlineDataParts(content.parts);
|
641
792
|
if (inlineDataParts != null) {
|
642
793
|
for (const part of inlineDataParts) {
|
643
794
|
controller.enqueue({
|
644
795
|
type: "file",
|
645
|
-
|
796
|
+
mediaType: part.inlineData.mimeType,
|
646
797
|
data: part.inlineData.data
|
647
798
|
});
|
648
799
|
}
|
@@ -676,17 +827,17 @@ var GoogleGenerativeAILanguageModel = class {
|
|
676
827
|
finishReason: candidate.finishReason,
|
677
828
|
hasToolCalls
|
678
829
|
});
|
679
|
-
const sources = (
|
830
|
+
const sources = (_g = extractSources({
|
680
831
|
groundingMetadata: candidate.groundingMetadata,
|
681
832
|
generateId: generateId2
|
682
|
-
})) != null ?
|
833
|
+
})) != null ? _g : [];
|
683
834
|
for (const source of sources) {
|
684
|
-
controller.enqueue(
|
835
|
+
controller.enqueue(source);
|
685
836
|
}
|
686
837
|
providerMetadata = {
|
687
838
|
google: {
|
688
|
-
groundingMetadata: (
|
689
|
-
safetyRatings: (
|
839
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
840
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
690
841
|
}
|
691
842
|
};
|
692
843
|
}
|
@@ -701,9 +852,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
701
852
|
}
|
702
853
|
})
|
703
854
|
),
|
704
|
-
|
705
|
-
rawResponse: { headers: responseHeaders },
|
706
|
-
warnings,
|
855
|
+
response: { headers: responseHeaders },
|
707
856
|
request: { body }
|
708
857
|
};
|
709
858
|
}
|
@@ -716,6 +865,7 @@ function getToolCallsFromParts({
|
|
716
865
|
(part) => "functionCall" in part
|
717
866
|
);
|
718
867
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
868
|
+
type: "tool-call",
|
719
869
|
toolCallType: "function",
|
720
870
|
toolCallId: generateId2(),
|
721
871
|
toolName: part.functionCall.name,
|
@@ -724,7 +874,10 @@ function getToolCallsFromParts({
|
|
724
874
|
}
|
725
875
|
function getTextFromParts(parts) {
|
726
876
|
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
727
|
-
return textParts == null || textParts.length === 0 ? void 0 :
|
877
|
+
return textParts == null || textParts.length === 0 ? void 0 : {
|
878
|
+
type: "text",
|
879
|
+
text: textParts.map((part) => part.text).join("")
|
880
|
+
};
|
728
881
|
}
|
729
882
|
function getInlineDataParts(parts) {
|
730
883
|
return parts == null ? void 0 : parts.filter(
|
@@ -739,180 +892,103 @@ function extractSources({
|
|
739
892
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
740
893
|
(chunk) => chunk.web != null
|
741
894
|
).map((chunk) => ({
|
895
|
+
type: "source",
|
742
896
|
sourceType: "url",
|
743
897
|
id: generateId2(),
|
744
898
|
url: chunk.web.uri,
|
745
899
|
title: chunk.web.title
|
746
900
|
}));
|
747
901
|
}
|
748
|
-
var contentSchema =
|
749
|
-
role:
|
750
|
-
parts:
|
751
|
-
|
752
|
-
|
753
|
-
text:
|
902
|
+
var contentSchema = import_zod5.z.object({
|
903
|
+
role: import_zod5.z.string(),
|
904
|
+
parts: import_zod5.z.array(
|
905
|
+
import_zod5.z.union([
|
906
|
+
import_zod5.z.object({
|
907
|
+
text: import_zod5.z.string()
|
754
908
|
}),
|
755
|
-
|
756
|
-
functionCall:
|
757
|
-
name:
|
758
|
-
args:
|
909
|
+
import_zod5.z.object({
|
910
|
+
functionCall: import_zod5.z.object({
|
911
|
+
name: import_zod5.z.string(),
|
912
|
+
args: import_zod5.z.unknown()
|
759
913
|
})
|
760
914
|
}),
|
761
|
-
|
762
|
-
inlineData:
|
763
|
-
mimeType:
|
764
|
-
data:
|
915
|
+
import_zod5.z.object({
|
916
|
+
inlineData: import_zod5.z.object({
|
917
|
+
mimeType: import_zod5.z.string(),
|
918
|
+
data: import_zod5.z.string()
|
765
919
|
})
|
766
920
|
})
|
767
921
|
])
|
768
922
|
).nullish()
|
769
923
|
});
|
770
|
-
var groundingChunkSchema =
|
771
|
-
web:
|
772
|
-
retrievedContext:
|
924
|
+
var groundingChunkSchema = import_zod5.z.object({
|
925
|
+
web: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish(),
|
926
|
+
retrievedContext: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish()
|
773
927
|
});
|
774
|
-
var groundingMetadataSchema =
|
775
|
-
webSearchQueries:
|
776
|
-
retrievalQueries:
|
777
|
-
searchEntryPoint:
|
778
|
-
groundingChunks:
|
779
|
-
groundingSupports:
|
780
|
-
|
781
|
-
segment:
|
782
|
-
startIndex:
|
783
|
-
endIndex:
|
784
|
-
text:
|
928
|
+
var groundingMetadataSchema = import_zod5.z.object({
|
929
|
+
webSearchQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
|
930
|
+
retrievalQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
|
931
|
+
searchEntryPoint: import_zod5.z.object({ renderedContent: import_zod5.z.string() }).nullish(),
|
932
|
+
groundingChunks: import_zod5.z.array(groundingChunkSchema).nullish(),
|
933
|
+
groundingSupports: import_zod5.z.array(
|
934
|
+
import_zod5.z.object({
|
935
|
+
segment: import_zod5.z.object({
|
936
|
+
startIndex: import_zod5.z.number().nullish(),
|
937
|
+
endIndex: import_zod5.z.number().nullish(),
|
938
|
+
text: import_zod5.z.string().nullish()
|
785
939
|
}),
|
786
|
-
segment_text:
|
787
|
-
groundingChunkIndices:
|
788
|
-
supportChunkIndices:
|
789
|
-
confidenceScores:
|
790
|
-
confidenceScore:
|
940
|
+
segment_text: import_zod5.z.string().nullish(),
|
941
|
+
groundingChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
942
|
+
supportChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
943
|
+
confidenceScores: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
944
|
+
confidenceScore: import_zod5.z.array(import_zod5.z.number()).nullish()
|
791
945
|
})
|
792
946
|
).nullish(),
|
793
|
-
retrievalMetadata:
|
794
|
-
|
795
|
-
webDynamicRetrievalScore:
|
947
|
+
retrievalMetadata: import_zod5.z.union([
|
948
|
+
import_zod5.z.object({
|
949
|
+
webDynamicRetrievalScore: import_zod5.z.number()
|
796
950
|
}),
|
797
|
-
|
951
|
+
import_zod5.z.object({})
|
798
952
|
]).nullish()
|
799
953
|
});
|
800
|
-
var safetyRatingSchema =
|
801
|
-
category:
|
802
|
-
probability:
|
803
|
-
probabilityScore:
|
804
|
-
severity:
|
805
|
-
severityScore:
|
806
|
-
blocked:
|
954
|
+
var safetyRatingSchema = import_zod5.z.object({
|
955
|
+
category: import_zod5.z.string().nullish(),
|
956
|
+
probability: import_zod5.z.string().nullish(),
|
957
|
+
probabilityScore: import_zod5.z.number().nullish(),
|
958
|
+
severity: import_zod5.z.string().nullish(),
|
959
|
+
severityScore: import_zod5.z.number().nullish(),
|
960
|
+
blocked: import_zod5.z.boolean().nullish()
|
807
961
|
});
|
808
|
-
var
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
962
|
+
var usageSchema = import_zod5.z.object({
|
963
|
+
cachedContentTokenCount: import_zod5.z.number().nullish(),
|
964
|
+
thoughtsTokenCount: import_zod5.z.number().nullish(),
|
965
|
+
promptTokenCount: import_zod5.z.number().nullish(),
|
966
|
+
candidatesTokenCount: import_zod5.z.number().nullish(),
|
967
|
+
totalTokenCount: import_zod5.z.number().nullish()
|
968
|
+
});
|
969
|
+
var responseSchema = import_zod5.z.object({
|
970
|
+
candidates: import_zod5.z.array(
|
971
|
+
import_zod5.z.object({
|
972
|
+
content: contentSchema.nullish().or(import_zod5.z.object({}).strict()),
|
973
|
+
finishReason: import_zod5.z.string().nullish(),
|
974
|
+
safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
|
814
975
|
groundingMetadata: groundingMetadataSchema.nullish()
|
815
976
|
})
|
816
977
|
),
|
817
|
-
usageMetadata:
|
818
|
-
promptTokenCount: import_zod2.z.number().nullish(),
|
819
|
-
candidatesTokenCount: import_zod2.z.number().nullish(),
|
820
|
-
totalTokenCount: import_zod2.z.number().nullish()
|
821
|
-
}).nullish()
|
978
|
+
usageMetadata: usageSchema.nullish()
|
822
979
|
});
|
823
|
-
var chunkSchema =
|
824
|
-
candidates:
|
825
|
-
|
980
|
+
var chunkSchema = import_zod5.z.object({
|
981
|
+
candidates: import_zod5.z.array(
|
982
|
+
import_zod5.z.object({
|
826
983
|
content: contentSchema.nullish(),
|
827
|
-
finishReason:
|
828
|
-
safetyRatings:
|
984
|
+
finishReason: import_zod5.z.string().nullish(),
|
985
|
+
safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
|
829
986
|
groundingMetadata: groundingMetadataSchema.nullish()
|
830
987
|
})
|
831
988
|
).nullish(),
|
832
|
-
usageMetadata:
|
833
|
-
promptTokenCount: import_zod2.z.number().nullish(),
|
834
|
-
candidatesTokenCount: import_zod2.z.number().nullish(),
|
835
|
-
totalTokenCount: import_zod2.z.number().nullish()
|
836
|
-
}).nullish()
|
837
|
-
});
|
838
|
-
var googleGenerativeAIProviderOptionsSchema = import_zod2.z.object({
|
839
|
-
responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).nullish(),
|
840
|
-
thinkingConfig: import_zod2.z.object({
|
841
|
-
thinkingBudget: import_zod2.z.number().nullish()
|
842
|
-
}).nullish()
|
843
|
-
});
|
844
|
-
|
845
|
-
// src/google-generative-ai-embedding-model.ts
|
846
|
-
var import_provider3 = require("@ai-sdk/provider");
|
847
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
848
|
-
var import_zod3 = require("zod");
|
849
|
-
var GoogleGenerativeAIEmbeddingModel = class {
|
850
|
-
constructor(modelId, settings, config) {
|
851
|
-
this.specificationVersion = "v1";
|
852
|
-
this.modelId = modelId;
|
853
|
-
this.settings = settings;
|
854
|
-
this.config = config;
|
855
|
-
}
|
856
|
-
get provider() {
|
857
|
-
return this.config.provider;
|
858
|
-
}
|
859
|
-
get maxEmbeddingsPerCall() {
|
860
|
-
return 2048;
|
861
|
-
}
|
862
|
-
get supportsParallelCalls() {
|
863
|
-
return true;
|
864
|
-
}
|
865
|
-
async doEmbed({
|
866
|
-
values,
|
867
|
-
headers,
|
868
|
-
abortSignal
|
869
|
-
}) {
|
870
|
-
if (values.length > this.maxEmbeddingsPerCall) {
|
871
|
-
throw new import_provider3.TooManyEmbeddingValuesForCallError({
|
872
|
-
provider: this.provider,
|
873
|
-
modelId: this.modelId,
|
874
|
-
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
875
|
-
values
|
876
|
-
});
|
877
|
-
}
|
878
|
-
const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
|
879
|
-
await (0, import_provider_utils4.resolve)(this.config.headers),
|
880
|
-
headers
|
881
|
-
);
|
882
|
-
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
883
|
-
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
884
|
-
headers: mergedHeaders,
|
885
|
-
body: {
|
886
|
-
requests: values.map((value) => ({
|
887
|
-
model: `models/${this.modelId}`,
|
888
|
-
content: { role: "user", parts: [{ text: value }] },
|
889
|
-
outputDimensionality: this.settings.outputDimensionality,
|
890
|
-
taskType: this.settings.taskType
|
891
|
-
}))
|
892
|
-
},
|
893
|
-
failedResponseHandler: googleFailedResponseHandler,
|
894
|
-
successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
|
895
|
-
googleGenerativeAITextEmbeddingResponseSchema
|
896
|
-
),
|
897
|
-
abortSignal,
|
898
|
-
fetch: this.config.fetch
|
899
|
-
});
|
900
|
-
return {
|
901
|
-
embeddings: response.embeddings.map((item) => item.values),
|
902
|
-
usage: void 0,
|
903
|
-
rawResponse: { headers: responseHeaders }
|
904
|
-
};
|
905
|
-
}
|
906
|
-
};
|
907
|
-
var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
|
908
|
-
embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
|
989
|
+
usageMetadata: usageSchema.nullish()
|
909
990
|
});
|
910
991
|
|
911
|
-
// src/google-supported-file-url.ts
|
912
|
-
function isSupportedFileUrl(url) {
|
913
|
-
return url.toString().startsWith("https://generativelanguage.googleapis.com/v1beta/files/");
|
914
|
-
}
|
915
|
-
|
916
992
|
// src/google-provider.ts
|
917
993
|
function createGoogleGenerativeAI(options = {}) {
|
918
994
|
var _a;
|
@@ -925,30 +1001,35 @@ function createGoogleGenerativeAI(options = {}) {
|
|
925
1001
|
}),
|
926
1002
|
...options.headers
|
927
1003
|
});
|
928
|
-
const createChatModel = (modelId
|
1004
|
+
const createChatModel = (modelId) => {
|
929
1005
|
var _a2;
|
930
|
-
return new GoogleGenerativeAILanguageModel(modelId,
|
1006
|
+
return new GoogleGenerativeAILanguageModel(modelId, {
|
931
1007
|
provider: "google.generative-ai",
|
932
1008
|
baseURL,
|
933
1009
|
headers: getHeaders,
|
934
1010
|
generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils5.generateId,
|
935
|
-
|
1011
|
+
supportedUrls: () => ({
|
1012
|
+
"*": [
|
1013
|
+
// HTTP URLs:
|
1014
|
+
/^https?:\/\/.*$/
|
1015
|
+
]
|
1016
|
+
}),
|
936
1017
|
fetch: options.fetch
|
937
1018
|
});
|
938
1019
|
};
|
939
|
-
const createEmbeddingModel = (modelId
|
1020
|
+
const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
|
940
1021
|
provider: "google.generative-ai",
|
941
1022
|
baseURL,
|
942
1023
|
headers: getHeaders,
|
943
1024
|
fetch: options.fetch
|
944
1025
|
});
|
945
|
-
const provider = function(modelId
|
1026
|
+
const provider = function(modelId) {
|
946
1027
|
if (new.target) {
|
947
1028
|
throw new Error(
|
948
1029
|
"The Google Generative AI model function cannot be called with the new keyword."
|
949
1030
|
);
|
950
1031
|
}
|
951
|
-
return createChatModel(modelId
|
1032
|
+
return createChatModel(modelId);
|
952
1033
|
};
|
953
1034
|
provider.languageModel = createChatModel;
|
954
1035
|
provider.chat = createChatModel;
|
@@ -956,6 +1037,9 @@ function createGoogleGenerativeAI(options = {}) {
|
|
956
1037
|
provider.embedding = createEmbeddingModel;
|
957
1038
|
provider.textEmbedding = createEmbeddingModel;
|
958
1039
|
provider.textEmbeddingModel = createEmbeddingModel;
|
1040
|
+
provider.imageModel = (modelId) => {
|
1041
|
+
throw new import_provider4.NoSuchModelError({ modelId, modelType: "imageModel" });
|
1042
|
+
};
|
959
1043
|
return provider;
|
960
1044
|
}
|
961
1045
|
var google = createGoogleGenerativeAI();
|