@ai-sdk/google 2.0.0-canary.2 → 2.0.0-canary.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +184 -0
- package/README.md +2 -2
- package/dist/index.d.mts +107 -65
- package/dist/index.d.ts +107 -65
- package/dist/index.js +363 -280
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +368 -282
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +14 -80
- package/{internal/dist → dist/internal}/index.d.ts +14 -80
- package/{internal/dist → dist/internal}/index.js +298 -253
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +301 -254
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +20 -19
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
package/dist/index.js
CHANGED
@@ -32,7 +32,7 @@ var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
32
32
|
// src/google-generative-ai-embedding-model.ts
|
33
33
|
var import_provider = require("@ai-sdk/provider");
|
34
34
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
35
|
-
var
|
35
|
+
var import_zod3 = require("zod");
|
36
36
|
|
37
37
|
// src/google-error.ts
|
38
38
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
@@ -49,28 +49,61 @@ var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
|
|
49
49
|
errorToMessage: (data) => data.error.message
|
50
50
|
});
|
51
51
|
|
52
|
+
// src/google-generative-ai-embedding-options.ts
|
53
|
+
var import_zod2 = require("zod");
|
54
|
+
var googleGenerativeAIEmbeddingProviderOptions = import_zod2.z.object({
|
55
|
+
/**
|
56
|
+
* Optional. Optional reduced dimension for the output embedding.
|
57
|
+
* If set, excessive values in the output embedding are truncated from the end.
|
58
|
+
*/
|
59
|
+
outputDimensionality: import_zod2.z.number().optional(),
|
60
|
+
/**
|
61
|
+
* Optional. Specifies the task type for generating embeddings.
|
62
|
+
* Supported task types:
|
63
|
+
* - SEMANTIC_SIMILARITY: Optimized for text similarity.
|
64
|
+
* - CLASSIFICATION: Optimized for text classification.
|
65
|
+
* - CLUSTERING: Optimized for clustering texts based on similarity.
|
66
|
+
* - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
|
67
|
+
* - RETRIEVAL_QUERY: Optimized for query-based retrieval.
|
68
|
+
* - QUESTION_ANSWERING: Optimized for answering questions.
|
69
|
+
* - FACT_VERIFICATION: Optimized for verifying factual information.
|
70
|
+
* - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
|
71
|
+
*/
|
72
|
+
taskType: import_zod2.z.enum([
|
73
|
+
"SEMANTIC_SIMILARITY",
|
74
|
+
"CLASSIFICATION",
|
75
|
+
"CLUSTERING",
|
76
|
+
"RETRIEVAL_DOCUMENT",
|
77
|
+
"RETRIEVAL_QUERY",
|
78
|
+
"QUESTION_ANSWERING",
|
79
|
+
"FACT_VERIFICATION",
|
80
|
+
"CODE_RETRIEVAL_QUERY"
|
81
|
+
]).optional()
|
82
|
+
});
|
83
|
+
|
52
84
|
// src/google-generative-ai-embedding-model.ts
|
53
85
|
var GoogleGenerativeAIEmbeddingModel = class {
|
54
|
-
constructor(modelId,
|
55
|
-
this.specificationVersion = "
|
86
|
+
constructor(modelId, config) {
|
87
|
+
this.specificationVersion = "v2";
|
88
|
+
this.maxEmbeddingsPerCall = 2048;
|
89
|
+
this.supportsParallelCalls = true;
|
56
90
|
this.modelId = modelId;
|
57
|
-
this.settings = settings;
|
58
91
|
this.config = config;
|
59
92
|
}
|
60
93
|
get provider() {
|
61
94
|
return this.config.provider;
|
62
95
|
}
|
63
|
-
get maxEmbeddingsPerCall() {
|
64
|
-
return 2048;
|
65
|
-
}
|
66
|
-
get supportsParallelCalls() {
|
67
|
-
return true;
|
68
|
-
}
|
69
96
|
async doEmbed({
|
70
97
|
values,
|
71
98
|
headers,
|
72
|
-
abortSignal
|
99
|
+
abortSignal,
|
100
|
+
providerOptions
|
73
101
|
}) {
|
102
|
+
const googleOptions = await (0, import_provider_utils2.parseProviderOptions)({
|
103
|
+
provider: "google",
|
104
|
+
providerOptions,
|
105
|
+
schema: googleGenerativeAIEmbeddingProviderOptions
|
106
|
+
});
|
74
107
|
if (values.length > this.maxEmbeddingsPerCall) {
|
75
108
|
throw new import_provider.TooManyEmbeddingValuesForCallError({
|
76
109
|
provider: this.provider,
|
@@ -83,14 +116,19 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
83
116
|
await (0, import_provider_utils2.resolve)(this.config.headers),
|
84
117
|
headers
|
85
118
|
);
|
86
|
-
const {
|
119
|
+
const {
|
120
|
+
responseHeaders,
|
121
|
+
value: response,
|
122
|
+
rawValue
|
123
|
+
} = await (0, import_provider_utils2.postJsonToApi)({
|
87
124
|
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
88
125
|
headers: mergedHeaders,
|
89
126
|
body: {
|
90
127
|
requests: values.map((value) => ({
|
91
128
|
model: `models/${this.modelId}`,
|
92
129
|
content: { role: "user", parts: [{ text: value }] },
|
93
|
-
outputDimensionality:
|
130
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
131
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
94
132
|
}))
|
95
133
|
},
|
96
134
|
failedResponseHandler: googleFailedResponseHandler,
|
@@ -103,21 +141,21 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
103
141
|
return {
|
104
142
|
embeddings: response.embeddings.map((item) => item.values),
|
105
143
|
usage: void 0,
|
106
|
-
|
144
|
+
response: { headers: responseHeaders, body: rawValue }
|
107
145
|
};
|
108
146
|
}
|
109
147
|
};
|
110
|
-
var googleGenerativeAITextEmbeddingResponseSchema =
|
111
|
-
embeddings:
|
148
|
+
var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
|
149
|
+
embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
|
112
150
|
});
|
113
151
|
|
114
152
|
// src/google-generative-ai-language-model.ts
|
115
153
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
116
|
-
var
|
154
|
+
var import_zod5 = require("zod");
|
117
155
|
|
118
156
|
// src/convert-json-schema-to-openapi-schema.ts
|
119
157
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
120
|
-
if (isEmptyObjectSchema(jsonSchema)) {
|
158
|
+
if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
|
121
159
|
return void 0;
|
122
160
|
}
|
123
161
|
if (typeof jsonSchema === "boolean") {
|
@@ -216,7 +254,6 @@ function isEmptyObjectSchema(jsonSchema) {
|
|
216
254
|
var import_provider2 = require("@ai-sdk/provider");
|
217
255
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
218
256
|
function convertToGoogleGenerativeAIMessages(prompt) {
|
219
|
-
var _a, _b;
|
220
257
|
const systemInstructionParts = [];
|
221
258
|
const contents = [];
|
222
259
|
let systemMessagesAllowed = true;
|
@@ -240,33 +277,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
240
277
|
parts.push({ text: part.text });
|
241
278
|
break;
|
242
279
|
}
|
243
|
-
case "image": {
|
244
|
-
parts.push(
|
245
|
-
part.image instanceof URL ? {
|
246
|
-
fileData: {
|
247
|
-
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
|
248
|
-
fileUri: part.image.toString()
|
249
|
-
}
|
250
|
-
} : {
|
251
|
-
inlineData: {
|
252
|
-
mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
|
253
|
-
data: (0, import_provider_utils3.convertUint8ArrayToBase64)(part.image)
|
254
|
-
}
|
255
|
-
}
|
256
|
-
);
|
257
|
-
break;
|
258
|
-
}
|
259
280
|
case "file": {
|
281
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
260
282
|
parts.push(
|
261
283
|
part.data instanceof URL ? {
|
262
284
|
fileData: {
|
263
|
-
mimeType:
|
285
|
+
mimeType: mediaType,
|
264
286
|
fileUri: part.data.toString()
|
265
287
|
}
|
266
288
|
} : {
|
267
289
|
inlineData: {
|
268
|
-
mimeType:
|
269
|
-
data: part.data
|
290
|
+
mimeType: mediaType,
|
291
|
+
data: (0, import_provider_utils3.convertToBase64)(part.data)
|
270
292
|
}
|
271
293
|
}
|
272
294
|
);
|
@@ -287,7 +309,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
287
309
|
return part.text.length === 0 ? void 0 : { text: part.text };
|
288
310
|
}
|
289
311
|
case "file": {
|
290
|
-
if (part.
|
312
|
+
if (part.mediaType !== "image/png") {
|
291
313
|
throw new import_provider2.UnsupportedFunctionalityError({
|
292
314
|
functionality: "Only PNG images are supported in assistant messages"
|
293
315
|
});
|
@@ -299,8 +321,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
299
321
|
}
|
300
322
|
return {
|
301
323
|
inlineData: {
|
302
|
-
mimeType: part.
|
303
|
-
data: part.data
|
324
|
+
mimeType: part.mediaType,
|
325
|
+
data: (0, import_provider_utils3.convertToBase64)(part.data)
|
304
326
|
}
|
305
327
|
};
|
306
328
|
}
|
@@ -346,18 +368,110 @@ function getModelPath(modelId) {
|
|
346
368
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
347
369
|
}
|
348
370
|
|
371
|
+
// src/google-generative-ai-options.ts
|
372
|
+
var import_zod4 = require("zod");
|
373
|
+
var dynamicRetrievalConfig = import_zod4.z.object({
|
374
|
+
/**
|
375
|
+
* The mode of the predictor to be used in dynamic retrieval.
|
376
|
+
*/
|
377
|
+
mode: import_zod4.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
|
378
|
+
/**
|
379
|
+
* The threshold to be used in dynamic retrieval. If not set, a system default
|
380
|
+
* value is used.
|
381
|
+
*/
|
382
|
+
dynamicThreshold: import_zod4.z.number().optional()
|
383
|
+
});
|
384
|
+
var googleGenerativeAIProviderOptions = import_zod4.z.object({
|
385
|
+
responseModalities: import_zod4.z.array(import_zod4.z.enum(["TEXT", "IMAGE"])).optional(),
|
386
|
+
thinkingConfig: import_zod4.z.object({
|
387
|
+
thinkingBudget: import_zod4.z.number().optional()
|
388
|
+
}).optional(),
|
389
|
+
/**
|
390
|
+
Optional.
|
391
|
+
The name of the cached content used as context to serve the prediction.
|
392
|
+
Format: cachedContents/{cachedContent}
|
393
|
+
*/
|
394
|
+
cachedContent: import_zod4.z.string().optional(),
|
395
|
+
/**
|
396
|
+
* Optional. Enable structured output. Default is true.
|
397
|
+
*
|
398
|
+
* This is useful when the JSON Schema contains elements that are
|
399
|
+
* not supported by the OpenAPI schema version that
|
400
|
+
* Google Generative AI uses. You can use this to disable
|
401
|
+
* structured outputs if you need to.
|
402
|
+
*/
|
403
|
+
structuredOutputs: import_zod4.z.boolean().optional(),
|
404
|
+
/**
|
405
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
406
|
+
*/
|
407
|
+
safetySettings: import_zod4.z.array(
|
408
|
+
import_zod4.z.object({
|
409
|
+
category: import_zod4.z.enum([
|
410
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
411
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
412
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
413
|
+
"HARM_CATEGORY_HARASSMENT",
|
414
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
415
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
416
|
+
]),
|
417
|
+
threshold: import_zod4.z.enum([
|
418
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
419
|
+
"BLOCK_LOW_AND_ABOVE",
|
420
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
421
|
+
"BLOCK_ONLY_HIGH",
|
422
|
+
"BLOCK_NONE",
|
423
|
+
"OFF"
|
424
|
+
])
|
425
|
+
})
|
426
|
+
).optional(),
|
427
|
+
threshold: import_zod4.z.enum([
|
428
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
429
|
+
"BLOCK_LOW_AND_ABOVE",
|
430
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
431
|
+
"BLOCK_ONLY_HIGH",
|
432
|
+
"BLOCK_NONE",
|
433
|
+
"OFF"
|
434
|
+
]).optional(),
|
435
|
+
/**
|
436
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
437
|
+
*
|
438
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
439
|
+
*/
|
440
|
+
audioTimestamp: import_zod4.z.boolean().optional(),
|
441
|
+
/**
|
442
|
+
Optional. When enabled, the model will use Google search to ground the response.
|
443
|
+
|
444
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
|
445
|
+
*/
|
446
|
+
useSearchGrounding: import_zod4.z.boolean().optional(),
|
447
|
+
/**
|
448
|
+
Optional. Specifies the dynamic retrieval configuration.
|
449
|
+
|
450
|
+
@note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
|
451
|
+
|
452
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
|
453
|
+
*/
|
454
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
|
455
|
+
});
|
456
|
+
|
349
457
|
// src/google-prepare-tools.ts
|
350
458
|
var import_provider3 = require("@ai-sdk/provider");
|
351
|
-
function prepareTools(
|
352
|
-
|
353
|
-
|
459
|
+
function prepareTools({
|
460
|
+
tools,
|
461
|
+
toolChoice,
|
462
|
+
useSearchGrounding,
|
463
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig2,
|
464
|
+
modelId
|
465
|
+
}) {
|
466
|
+
var _a;
|
467
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
354
468
|
const toolWarnings = [];
|
355
469
|
const isGemini2 = modelId.includes("gemini-2");
|
356
470
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
357
471
|
if (useSearchGrounding) {
|
358
472
|
return {
|
359
473
|
tools: isGemini2 ? { googleSearch: {} } : {
|
360
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !
|
474
|
+
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
|
361
475
|
},
|
362
476
|
toolConfig: void 0,
|
363
477
|
toolWarnings
|
@@ -373,12 +487,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
373
487
|
} else {
|
374
488
|
functionDeclarations.push({
|
375
489
|
name: tool.name,
|
376
|
-
description: (
|
490
|
+
description: (_a = tool.description) != null ? _a : "",
|
377
491
|
parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
|
378
492
|
});
|
379
493
|
}
|
380
494
|
}
|
381
|
-
const toolChoice = mode.toolChoice;
|
382
495
|
if (toolChoice == null) {
|
383
496
|
return {
|
384
497
|
tools: { functionDeclarations },
|
@@ -420,7 +533,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
420
533
|
default: {
|
421
534
|
const _exhaustiveCheck = type;
|
422
535
|
throw new import_provider3.UnsupportedFunctionalityError({
|
423
|
-
functionality: `
|
536
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
424
537
|
});
|
425
538
|
}
|
426
539
|
}
|
@@ -455,25 +568,21 @@ function mapGoogleGenerativeAIFinishReason({
|
|
455
568
|
|
456
569
|
// src/google-generative-ai-language-model.ts
|
457
570
|
var GoogleGenerativeAILanguageModel = class {
|
458
|
-
constructor(modelId,
|
571
|
+
constructor(modelId, config) {
|
459
572
|
this.specificationVersion = "v2";
|
460
|
-
this.defaultObjectGenerationMode = "json";
|
461
|
-
this.supportsImageUrls = false;
|
462
573
|
this.modelId = modelId;
|
463
|
-
this.settings = settings;
|
464
574
|
this.config = config;
|
465
575
|
}
|
466
|
-
get supportsStructuredOutputs() {
|
467
|
-
var _a;
|
468
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
469
|
-
}
|
470
576
|
get provider() {
|
471
577
|
return this.config.provider;
|
472
578
|
}
|
579
|
+
get supportedUrls() {
|
580
|
+
var _a, _b, _c;
|
581
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
582
|
+
}
|
473
583
|
async getArgs({
|
474
|
-
mode,
|
475
584
|
prompt,
|
476
|
-
|
585
|
+
maxOutputTokens,
|
477
586
|
temperature,
|
478
587
|
topP,
|
479
588
|
topK,
|
@@ -482,111 +591,66 @@ var GoogleGenerativeAILanguageModel = class {
|
|
482
591
|
stopSequences,
|
483
592
|
responseFormat,
|
484
593
|
seed,
|
485
|
-
|
594
|
+
tools,
|
595
|
+
toolChoice,
|
596
|
+
providerOptions
|
486
597
|
}) {
|
487
598
|
var _a, _b;
|
488
|
-
const type = mode.type;
|
489
599
|
const warnings = [];
|
490
|
-
const googleOptions = (0, import_provider_utils4.parseProviderOptions)({
|
600
|
+
const googleOptions = await (0, import_provider_utils4.parseProviderOptions)({
|
491
601
|
provider: "google",
|
492
|
-
providerOptions
|
493
|
-
schema:
|
602
|
+
providerOptions,
|
603
|
+
schema: googleGenerativeAIProviderOptions
|
494
604
|
});
|
495
|
-
const generationConfig = {
|
496
|
-
// standardized settings:
|
497
|
-
maxOutputTokens: maxTokens,
|
498
|
-
temperature,
|
499
|
-
topK,
|
500
|
-
topP,
|
501
|
-
frequencyPenalty,
|
502
|
-
presencePenalty,
|
503
|
-
stopSequences,
|
504
|
-
seed,
|
505
|
-
// response format:
|
506
|
-
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
507
|
-
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
508
|
-
// so this is needed as an escape hatch:
|
509
|
-
this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
510
|
-
...this.settings.audioTimestamp && {
|
511
|
-
audioTimestamp: this.settings.audioTimestamp
|
512
|
-
},
|
513
|
-
// provider options:
|
514
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities
|
515
|
-
};
|
516
605
|
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
contents,
|
549
|
-
systemInstruction,
|
550
|
-
safetySettings: this.settings.safetySettings,
|
551
|
-
cachedContent: this.settings.cachedContent
|
552
|
-
},
|
553
|
-
warnings
|
554
|
-
};
|
555
|
-
}
|
556
|
-
case "object-tool": {
|
557
|
-
return {
|
558
|
-
args: {
|
559
|
-
generationConfig,
|
560
|
-
contents,
|
561
|
-
tools: {
|
562
|
-
functionDeclarations: [
|
563
|
-
{
|
564
|
-
name: mode.tool.name,
|
565
|
-
description: (_b = mode.tool.description) != null ? _b : "",
|
566
|
-
parameters: convertJSONSchemaToOpenAPISchema(
|
567
|
-
mode.tool.parameters
|
568
|
-
)
|
569
|
-
}
|
570
|
-
]
|
571
|
-
},
|
572
|
-
toolConfig: { functionCallingConfig: { mode: "ANY" } },
|
573
|
-
safetySettings: this.settings.safetySettings,
|
574
|
-
cachedContent: this.settings.cachedContent
|
606
|
+
const {
|
607
|
+
tools: googleTools,
|
608
|
+
toolConfig: googleToolConfig,
|
609
|
+
toolWarnings
|
610
|
+
} = prepareTools({
|
611
|
+
tools,
|
612
|
+
toolChoice,
|
613
|
+
useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
|
614
|
+
dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
|
615
|
+
modelId: this.modelId
|
616
|
+
});
|
617
|
+
return {
|
618
|
+
args: {
|
619
|
+
generationConfig: {
|
620
|
+
// standardized settings:
|
621
|
+
maxOutputTokens,
|
622
|
+
temperature,
|
623
|
+
topK,
|
624
|
+
topP,
|
625
|
+
frequencyPenalty,
|
626
|
+
presencePenalty,
|
627
|
+
stopSequences,
|
628
|
+
seed,
|
629
|
+
// response format:
|
630
|
+
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
631
|
+
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
632
|
+
// so this is needed as an escape hatch:
|
633
|
+
// TODO convert into provider option
|
634
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
635
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
636
|
+
audioTimestamp: googleOptions.audioTimestamp
|
575
637
|
},
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
638
|
+
// provider options:
|
639
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
640
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
641
|
+
},
|
642
|
+
contents,
|
643
|
+
systemInstruction,
|
644
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
645
|
+
tools: googleTools,
|
646
|
+
toolConfig: googleToolConfig,
|
647
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
648
|
+
},
|
649
|
+
warnings: [...warnings, ...toolWarnings]
|
650
|
+
};
|
587
651
|
}
|
588
652
|
async doGenerate(options) {
|
589
|
-
var _a, _b, _c, _d, _e;
|
653
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
590
654
|
const { args, warnings } = await this.getArgs(options);
|
591
655
|
const body = JSON.stringify(args);
|
592
656
|
const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
|
@@ -608,43 +672,62 @@ var GoogleGenerativeAILanguageModel = class {
|
|
608
672
|
abortSignal: options.abortSignal,
|
609
673
|
fetch: this.config.fetch
|
610
674
|
});
|
611
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
612
675
|
const candidate = response.candidates[0];
|
613
|
-
const
|
614
|
-
const
|
615
|
-
|
676
|
+
const content = [];
|
677
|
+
const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
|
678
|
+
for (const part of parts) {
|
679
|
+
if ("text" in part && part.text.length > 0) {
|
680
|
+
content.push({ type: "text", text: part.text });
|
681
|
+
} else if ("functionCall" in part) {
|
682
|
+
content.push({
|
683
|
+
type: "tool-call",
|
684
|
+
toolCallType: "function",
|
685
|
+
toolCallId: this.config.generateId(),
|
686
|
+
toolName: part.functionCall.name,
|
687
|
+
args: JSON.stringify(part.functionCall.args)
|
688
|
+
});
|
689
|
+
} else if ("inlineData" in part) {
|
690
|
+
content.push({
|
691
|
+
type: "file",
|
692
|
+
data: part.inlineData.data,
|
693
|
+
mediaType: part.inlineData.mimeType
|
694
|
+
});
|
695
|
+
}
|
696
|
+
}
|
697
|
+
const sources = (_b = extractSources({
|
698
|
+
groundingMetadata: candidate.groundingMetadata,
|
616
699
|
generateId: this.config.generateId
|
617
|
-
});
|
700
|
+
})) != null ? _b : [];
|
701
|
+
for (const source of sources) {
|
702
|
+
content.push(source);
|
703
|
+
}
|
618
704
|
const usageMetadata = response.usageMetadata;
|
619
705
|
return {
|
620
|
-
|
621
|
-
files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
|
622
|
-
data: part.inlineData.data,
|
623
|
-
mimeType: part.inlineData.mimeType
|
624
|
-
})),
|
625
|
-
toolCalls,
|
706
|
+
content,
|
626
707
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
627
708
|
finishReason: candidate.finishReason,
|
628
|
-
hasToolCalls:
|
709
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
629
710
|
}),
|
630
711
|
usage: {
|
631
|
-
|
632
|
-
|
712
|
+
inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
|
713
|
+
outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
|
714
|
+
totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
|
715
|
+
reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
|
716
|
+
cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
|
633
717
|
},
|
634
|
-
rawCall: { rawPrompt, rawSettings },
|
635
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
636
718
|
warnings,
|
637
719
|
providerMetadata: {
|
638
720
|
google: {
|
639
|
-
groundingMetadata: (
|
640
|
-
safetyRatings: (
|
721
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
722
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
641
723
|
}
|
642
724
|
},
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
725
|
+
request: { body },
|
726
|
+
response: {
|
727
|
+
// TODO timestamp, model id, id
|
728
|
+
headers: responseHeaders,
|
729
|
+
body: rawResponse
|
730
|
+
}
|
648
731
|
};
|
649
732
|
}
|
650
733
|
async doStream(options) {
|
@@ -665,11 +748,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
665
748
|
abortSignal: options.abortSignal,
|
666
749
|
fetch: this.config.fetch
|
667
750
|
});
|
668
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
669
751
|
let finishReason = "unknown";
|
670
|
-
|
671
|
-
|
672
|
-
|
752
|
+
const usage = {
|
753
|
+
inputTokens: void 0,
|
754
|
+
outputTokens: void 0,
|
755
|
+
totalTokens: void 0
|
673
756
|
};
|
674
757
|
let providerMetadata = void 0;
|
675
758
|
const generateId2 = this.config.generateId;
|
@@ -677,8 +760,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
677
760
|
return {
|
678
761
|
stream: response.pipeThrough(
|
679
762
|
new TransformStream({
|
763
|
+
start(controller) {
|
764
|
+
controller.enqueue({ type: "stream-start", warnings });
|
765
|
+
},
|
680
766
|
transform(chunk, controller) {
|
681
|
-
var _a, _b, _c, _d, _e, _f;
|
767
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
682
768
|
if (!chunk.success) {
|
683
769
|
controller.enqueue({ type: "error", error: chunk.error });
|
684
770
|
return;
|
@@ -686,12 +772,13 @@ var GoogleGenerativeAILanguageModel = class {
|
|
686
772
|
const value = chunk.value;
|
687
773
|
const usageMetadata = value.usageMetadata;
|
688
774
|
if (usageMetadata != null) {
|
689
|
-
usage =
|
690
|
-
|
691
|
-
|
692
|
-
|
775
|
+
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
776
|
+
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
777
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
778
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
779
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
693
780
|
}
|
694
|
-
const candidate = (
|
781
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
695
782
|
if (candidate == null) {
|
696
783
|
return;
|
697
784
|
}
|
@@ -699,17 +786,14 @@ var GoogleGenerativeAILanguageModel = class {
|
|
699
786
|
if (content != null) {
|
700
787
|
const deltaText = getTextFromParts(content.parts);
|
701
788
|
if (deltaText != null) {
|
702
|
-
controller.enqueue(
|
703
|
-
type: "text-delta",
|
704
|
-
textDelta: deltaText
|
705
|
-
});
|
789
|
+
controller.enqueue(deltaText);
|
706
790
|
}
|
707
791
|
const inlineDataParts = getInlineDataParts(content.parts);
|
708
792
|
if (inlineDataParts != null) {
|
709
793
|
for (const part of inlineDataParts) {
|
710
794
|
controller.enqueue({
|
711
795
|
type: "file",
|
712
|
-
|
796
|
+
mediaType: part.inlineData.mimeType,
|
713
797
|
data: part.inlineData.data
|
714
798
|
});
|
715
799
|
}
|
@@ -743,17 +827,17 @@ var GoogleGenerativeAILanguageModel = class {
|
|
743
827
|
finishReason: candidate.finishReason,
|
744
828
|
hasToolCalls
|
745
829
|
});
|
746
|
-
const sources = (
|
830
|
+
const sources = (_g = extractSources({
|
747
831
|
groundingMetadata: candidate.groundingMetadata,
|
748
832
|
generateId: generateId2
|
749
|
-
})) != null ?
|
833
|
+
})) != null ? _g : [];
|
750
834
|
for (const source of sources) {
|
751
|
-
controller.enqueue(
|
835
|
+
controller.enqueue(source);
|
752
836
|
}
|
753
837
|
providerMetadata = {
|
754
838
|
google: {
|
755
|
-
groundingMetadata: (
|
756
|
-
safetyRatings: (
|
839
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
840
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
757
841
|
}
|
758
842
|
};
|
759
843
|
}
|
@@ -768,9 +852,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
768
852
|
}
|
769
853
|
})
|
770
854
|
),
|
771
|
-
|
772
|
-
rawResponse: { headers: responseHeaders },
|
773
|
-
warnings,
|
855
|
+
response: { headers: responseHeaders },
|
774
856
|
request: { body }
|
775
857
|
};
|
776
858
|
}
|
@@ -783,6 +865,7 @@ function getToolCallsFromParts({
|
|
783
865
|
(part) => "functionCall" in part
|
784
866
|
);
|
785
867
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
868
|
+
type: "tool-call",
|
786
869
|
toolCallType: "function",
|
787
870
|
toolCallId: generateId2(),
|
788
871
|
toolName: part.functionCall.name,
|
@@ -791,7 +874,10 @@ function getToolCallsFromParts({
|
|
791
874
|
}
|
792
875
|
function getTextFromParts(parts) {
|
793
876
|
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
794
|
-
return textParts == null || textParts.length === 0 ? void 0 :
|
877
|
+
return textParts == null || textParts.length === 0 ? void 0 : {
|
878
|
+
type: "text",
|
879
|
+
text: textParts.map((part) => part.text).join("")
|
880
|
+
};
|
795
881
|
}
|
796
882
|
function getInlineDataParts(parts) {
|
797
883
|
return parts == null ? void 0 : parts.filter(
|
@@ -806,110 +892,102 @@ function extractSources({
|
|
806
892
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
807
893
|
(chunk) => chunk.web != null
|
808
894
|
).map((chunk) => ({
|
895
|
+
type: "source",
|
809
896
|
sourceType: "url",
|
810
897
|
id: generateId2(),
|
811
898
|
url: chunk.web.uri,
|
812
899
|
title: chunk.web.title
|
813
900
|
}));
|
814
901
|
}
|
815
|
-
var contentSchema =
|
816
|
-
role:
|
817
|
-
parts:
|
818
|
-
|
819
|
-
|
820
|
-
text:
|
902
|
+
var contentSchema = import_zod5.z.object({
|
903
|
+
role: import_zod5.z.string(),
|
904
|
+
parts: import_zod5.z.array(
|
905
|
+
import_zod5.z.union([
|
906
|
+
import_zod5.z.object({
|
907
|
+
text: import_zod5.z.string()
|
821
908
|
}),
|
822
|
-
|
823
|
-
functionCall:
|
824
|
-
name:
|
825
|
-
args:
|
909
|
+
import_zod5.z.object({
|
910
|
+
functionCall: import_zod5.z.object({
|
911
|
+
name: import_zod5.z.string(),
|
912
|
+
args: import_zod5.z.unknown()
|
826
913
|
})
|
827
914
|
}),
|
828
|
-
|
829
|
-
inlineData:
|
830
|
-
mimeType:
|
831
|
-
data:
|
915
|
+
import_zod5.z.object({
|
916
|
+
inlineData: import_zod5.z.object({
|
917
|
+
mimeType: import_zod5.z.string(),
|
918
|
+
data: import_zod5.z.string()
|
832
919
|
})
|
833
920
|
})
|
834
921
|
])
|
835
922
|
).nullish()
|
836
923
|
});
|
837
|
-
var groundingChunkSchema =
|
838
|
-
web:
|
839
|
-
retrievedContext:
|
924
|
+
var groundingChunkSchema = import_zod5.z.object({
|
925
|
+
web: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish(),
|
926
|
+
retrievedContext: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish()
|
840
927
|
});
|
841
|
-
var groundingMetadataSchema =
|
842
|
-
webSearchQueries:
|
843
|
-
retrievalQueries:
|
844
|
-
searchEntryPoint:
|
845
|
-
groundingChunks:
|
846
|
-
groundingSupports:
|
847
|
-
|
848
|
-
segment:
|
849
|
-
startIndex:
|
850
|
-
endIndex:
|
851
|
-
text:
|
928
|
+
var groundingMetadataSchema = import_zod5.z.object({
|
929
|
+
webSearchQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
|
930
|
+
retrievalQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
|
931
|
+
searchEntryPoint: import_zod5.z.object({ renderedContent: import_zod5.z.string() }).nullish(),
|
932
|
+
groundingChunks: import_zod5.z.array(groundingChunkSchema).nullish(),
|
933
|
+
groundingSupports: import_zod5.z.array(
|
934
|
+
import_zod5.z.object({
|
935
|
+
segment: import_zod5.z.object({
|
936
|
+
startIndex: import_zod5.z.number().nullish(),
|
937
|
+
endIndex: import_zod5.z.number().nullish(),
|
938
|
+
text: import_zod5.z.string().nullish()
|
852
939
|
}),
|
853
|
-
segment_text:
|
854
|
-
groundingChunkIndices:
|
855
|
-
supportChunkIndices:
|
856
|
-
confidenceScores:
|
857
|
-
confidenceScore:
|
940
|
+
segment_text: import_zod5.z.string().nullish(),
|
941
|
+
groundingChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
942
|
+
supportChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
943
|
+
confidenceScores: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
944
|
+
confidenceScore: import_zod5.z.array(import_zod5.z.number()).nullish()
|
858
945
|
})
|
859
946
|
).nullish(),
|
860
|
-
retrievalMetadata:
|
861
|
-
|
862
|
-
webDynamicRetrievalScore:
|
947
|
+
retrievalMetadata: import_zod5.z.union([
|
948
|
+
import_zod5.z.object({
|
949
|
+
webDynamicRetrievalScore: import_zod5.z.number()
|
863
950
|
}),
|
864
|
-
|
951
|
+
import_zod5.z.object({})
|
865
952
|
]).nullish()
|
866
953
|
});
|
867
|
-
var safetyRatingSchema =
|
868
|
-
category:
|
869
|
-
probability:
|
870
|
-
probabilityScore:
|
871
|
-
severity:
|
872
|
-
severityScore:
|
873
|
-
blocked:
|
954
|
+
var safetyRatingSchema = import_zod5.z.object({
|
955
|
+
category: import_zod5.z.string().nullish(),
|
956
|
+
probability: import_zod5.z.string().nullish(),
|
957
|
+
probabilityScore: import_zod5.z.number().nullish(),
|
958
|
+
severity: import_zod5.z.string().nullish(),
|
959
|
+
severityScore: import_zod5.z.number().nullish(),
|
960
|
+
blocked: import_zod5.z.boolean().nullish()
|
961
|
+
});
|
962
|
+
var usageSchema = import_zod5.z.object({
|
963
|
+
cachedContentTokenCount: import_zod5.z.number().nullish(),
|
964
|
+
thoughtsTokenCount: import_zod5.z.number().nullish(),
|
965
|
+
promptTokenCount: import_zod5.z.number().nullish(),
|
966
|
+
candidatesTokenCount: import_zod5.z.number().nullish(),
|
967
|
+
totalTokenCount: import_zod5.z.number().nullish()
|
874
968
|
});
|
875
|
-
var responseSchema =
|
876
|
-
candidates:
|
877
|
-
|
878
|
-
content: contentSchema.nullish().or(
|
879
|
-
finishReason:
|
880
|
-
safetyRatings:
|
969
|
+
var responseSchema = import_zod5.z.object({
|
970
|
+
candidates: import_zod5.z.array(
|
971
|
+
import_zod5.z.object({
|
972
|
+
content: contentSchema.nullish().or(import_zod5.z.object({}).strict()),
|
973
|
+
finishReason: import_zod5.z.string().nullish(),
|
974
|
+
safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
|
881
975
|
groundingMetadata: groundingMetadataSchema.nullish()
|
882
976
|
})
|
883
977
|
),
|
884
|
-
usageMetadata:
|
885
|
-
promptTokenCount: import_zod3.z.number().nullish(),
|
886
|
-
candidatesTokenCount: import_zod3.z.number().nullish(),
|
887
|
-
totalTokenCount: import_zod3.z.number().nullish()
|
888
|
-
}).nullish()
|
978
|
+
usageMetadata: usageSchema.nullish()
|
889
979
|
});
|
890
|
-
var chunkSchema =
|
891
|
-
candidates:
|
892
|
-
|
980
|
+
var chunkSchema = import_zod5.z.object({
|
981
|
+
candidates: import_zod5.z.array(
|
982
|
+
import_zod5.z.object({
|
893
983
|
content: contentSchema.nullish(),
|
894
|
-
finishReason:
|
895
|
-
safetyRatings:
|
984
|
+
finishReason: import_zod5.z.string().nullish(),
|
985
|
+
safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
|
896
986
|
groundingMetadata: groundingMetadataSchema.nullish()
|
897
987
|
})
|
898
988
|
).nullish(),
|
899
|
-
usageMetadata:
|
900
|
-
promptTokenCount: import_zod3.z.number().nullish(),
|
901
|
-
candidatesTokenCount: import_zod3.z.number().nullish(),
|
902
|
-
totalTokenCount: import_zod3.z.number().nullish()
|
903
|
-
}).nullish()
|
989
|
+
usageMetadata: usageSchema.nullish()
|
904
990
|
});
|
905
|
-
var googleGenerativeAIProviderOptionsSchema = import_zod3.z.object({
|
906
|
-
responseModalities: import_zod3.z.array(import_zod3.z.enum(["TEXT", "IMAGE"])).nullish()
|
907
|
-
});
|
908
|
-
|
909
|
-
// src/google-supported-file-url.ts
|
910
|
-
function isSupportedFileUrl(url) {
|
911
|
-
return url.toString().startsWith("https://generativelanguage.googleapis.com/v1beta/files/");
|
912
|
-
}
|
913
991
|
|
914
992
|
// src/google-provider.ts
|
915
993
|
function createGoogleGenerativeAI(options = {}) {
|
@@ -923,30 +1001,35 @@ function createGoogleGenerativeAI(options = {}) {
|
|
923
1001
|
}),
|
924
1002
|
...options.headers
|
925
1003
|
});
|
926
|
-
const createChatModel = (modelId
|
1004
|
+
const createChatModel = (modelId) => {
|
927
1005
|
var _a2;
|
928
|
-
return new GoogleGenerativeAILanguageModel(modelId,
|
1006
|
+
return new GoogleGenerativeAILanguageModel(modelId, {
|
929
1007
|
provider: "google.generative-ai",
|
930
1008
|
baseURL,
|
931
1009
|
headers: getHeaders,
|
932
1010
|
generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils5.generateId,
|
933
|
-
|
1011
|
+
supportedUrls: () => ({
|
1012
|
+
"*": [
|
1013
|
+
// HTTP URLs:
|
1014
|
+
/^https?:\/\/.*$/
|
1015
|
+
]
|
1016
|
+
}),
|
934
1017
|
fetch: options.fetch
|
935
1018
|
});
|
936
1019
|
};
|
937
|
-
const createEmbeddingModel = (modelId
|
1020
|
+
const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
|
938
1021
|
provider: "google.generative-ai",
|
939
1022
|
baseURL,
|
940
1023
|
headers: getHeaders,
|
941
1024
|
fetch: options.fetch
|
942
1025
|
});
|
943
|
-
const provider = function(modelId
|
1026
|
+
const provider = function(modelId) {
|
944
1027
|
if (new.target) {
|
945
1028
|
throw new Error(
|
946
1029
|
"The Google Generative AI model function cannot be called with the new keyword."
|
947
1030
|
);
|
948
1031
|
}
|
949
|
-
return createChatModel(modelId
|
1032
|
+
return createChatModel(modelId);
|
950
1033
|
};
|
951
1034
|
provider.languageModel = createChatModel;
|
952
1035
|
provider.chat = createChatModel;
|