@ai-sdk/google 2.0.0-canary.9 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +559 -0
- package/README.md +2 -2
- package/dist/index.d.mts +157 -267
- package/dist/index.d.ts +157 -267
- package/dist/index.js +732 -220
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +726 -210
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +47 -254
- package/dist/internal/index.d.ts +47 -254
- package/dist/internal/index.js +529 -191
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +516 -179
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +11 -9
package/dist/index.js
CHANGED
@@ -26,22 +26,21 @@ __export(src_exports, {
|
|
26
26
|
module.exports = __toCommonJS(src_exports);
|
27
27
|
|
28
28
|
// src/google-provider.ts
|
29
|
-
var
|
30
|
-
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
29
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
31
30
|
|
32
31
|
// src/google-generative-ai-embedding-model.ts
|
33
32
|
var import_provider = require("@ai-sdk/provider");
|
34
33
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
35
|
-
var
|
34
|
+
var import_v43 = require("zod/v4");
|
36
35
|
|
37
36
|
// src/google-error.ts
|
38
37
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
39
|
-
var
|
40
|
-
var googleErrorDataSchema =
|
41
|
-
error:
|
42
|
-
code:
|
43
|
-
message:
|
44
|
-
status:
|
38
|
+
var import_v4 = require("zod/v4");
|
39
|
+
var googleErrorDataSchema = import_v4.z.object({
|
40
|
+
error: import_v4.z.object({
|
41
|
+
code: import_v4.z.number().nullable(),
|
42
|
+
message: import_v4.z.string(),
|
43
|
+
status: import_v4.z.string()
|
45
44
|
})
|
46
45
|
});
|
47
46
|
var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
@@ -49,28 +48,61 @@ var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
|
|
49
48
|
errorToMessage: (data) => data.error.message
|
50
49
|
});
|
51
50
|
|
51
|
+
// src/google-generative-ai-embedding-options.ts
|
52
|
+
var import_v42 = require("zod/v4");
|
53
|
+
var googleGenerativeAIEmbeddingProviderOptions = import_v42.z.object({
|
54
|
+
/**
|
55
|
+
* Optional. Optional reduced dimension for the output embedding.
|
56
|
+
* If set, excessive values in the output embedding are truncated from the end.
|
57
|
+
*/
|
58
|
+
outputDimensionality: import_v42.z.number().optional(),
|
59
|
+
/**
|
60
|
+
* Optional. Specifies the task type for generating embeddings.
|
61
|
+
* Supported task types:
|
62
|
+
* - SEMANTIC_SIMILARITY: Optimized for text similarity.
|
63
|
+
* - CLASSIFICATION: Optimized for text classification.
|
64
|
+
* - CLUSTERING: Optimized for clustering texts based on similarity.
|
65
|
+
* - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
|
66
|
+
* - RETRIEVAL_QUERY: Optimized for query-based retrieval.
|
67
|
+
* - QUESTION_ANSWERING: Optimized for answering questions.
|
68
|
+
* - FACT_VERIFICATION: Optimized for verifying factual information.
|
69
|
+
* - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
|
70
|
+
*/
|
71
|
+
taskType: import_v42.z.enum([
|
72
|
+
"SEMANTIC_SIMILARITY",
|
73
|
+
"CLASSIFICATION",
|
74
|
+
"CLUSTERING",
|
75
|
+
"RETRIEVAL_DOCUMENT",
|
76
|
+
"RETRIEVAL_QUERY",
|
77
|
+
"QUESTION_ANSWERING",
|
78
|
+
"FACT_VERIFICATION",
|
79
|
+
"CODE_RETRIEVAL_QUERY"
|
80
|
+
]).optional()
|
81
|
+
});
|
82
|
+
|
52
83
|
// src/google-generative-ai-embedding-model.ts
|
53
84
|
var GoogleGenerativeAIEmbeddingModel = class {
|
54
|
-
constructor(modelId,
|
85
|
+
constructor(modelId, config) {
|
55
86
|
this.specificationVersion = "v2";
|
87
|
+
this.maxEmbeddingsPerCall = 2048;
|
88
|
+
this.supportsParallelCalls = true;
|
56
89
|
this.modelId = modelId;
|
57
|
-
this.settings = settings;
|
58
90
|
this.config = config;
|
59
91
|
}
|
60
92
|
get provider() {
|
61
93
|
return this.config.provider;
|
62
94
|
}
|
63
|
-
get maxEmbeddingsPerCall() {
|
64
|
-
return 2048;
|
65
|
-
}
|
66
|
-
get supportsParallelCalls() {
|
67
|
-
return true;
|
68
|
-
}
|
69
95
|
async doEmbed({
|
70
96
|
values,
|
71
97
|
headers,
|
72
|
-
abortSignal
|
98
|
+
abortSignal,
|
99
|
+
providerOptions
|
73
100
|
}) {
|
101
|
+
const googleOptions = await (0, import_provider_utils2.parseProviderOptions)({
|
102
|
+
provider: "google",
|
103
|
+
providerOptions,
|
104
|
+
schema: googleGenerativeAIEmbeddingProviderOptions
|
105
|
+
});
|
74
106
|
if (values.length > this.maxEmbeddingsPerCall) {
|
75
107
|
throw new import_provider.TooManyEmbeddingValuesForCallError({
|
76
108
|
provider: this.provider,
|
@@ -83,6 +115,35 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
83
115
|
await (0, import_provider_utils2.resolve)(this.config.headers),
|
84
116
|
headers
|
85
117
|
);
|
118
|
+
if (values.length === 1) {
|
119
|
+
const {
|
120
|
+
responseHeaders: responseHeaders2,
|
121
|
+
value: response2,
|
122
|
+
rawValue: rawValue2
|
123
|
+
} = await (0, import_provider_utils2.postJsonToApi)({
|
124
|
+
url: `${this.config.baseURL}/models/${this.modelId}:embedContent`,
|
125
|
+
headers: mergedHeaders,
|
126
|
+
body: {
|
127
|
+
model: `models/${this.modelId}`,
|
128
|
+
content: {
|
129
|
+
parts: [{ text: values[0] }]
|
130
|
+
},
|
131
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
132
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
133
|
+
},
|
134
|
+
failedResponseHandler: googleFailedResponseHandler,
|
135
|
+
successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
|
136
|
+
googleGenerativeAISingleEmbeddingResponseSchema
|
137
|
+
),
|
138
|
+
abortSignal,
|
139
|
+
fetch: this.config.fetch
|
140
|
+
});
|
141
|
+
return {
|
142
|
+
embeddings: [response2.embedding.values],
|
143
|
+
usage: void 0,
|
144
|
+
response: { headers: responseHeaders2, body: rawValue2 }
|
145
|
+
};
|
146
|
+
}
|
86
147
|
const {
|
87
148
|
responseHeaders,
|
88
149
|
value: response,
|
@@ -94,7 +155,8 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
94
155
|
requests: values.map((value) => ({
|
95
156
|
model: `models/${this.modelId}`,
|
96
157
|
content: { role: "user", parts: [{ text: value }] },
|
97
|
-
outputDimensionality:
|
158
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
159
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
98
160
|
}))
|
99
161
|
},
|
100
162
|
failedResponseHandler: googleFailedResponseHandler,
|
@@ -111,13 +173,16 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
111
173
|
};
|
112
174
|
}
|
113
175
|
};
|
114
|
-
var googleGenerativeAITextEmbeddingResponseSchema =
|
115
|
-
embeddings:
|
176
|
+
var googleGenerativeAITextEmbeddingResponseSchema = import_v43.z.object({
|
177
|
+
embeddings: import_v43.z.array(import_v43.z.object({ values: import_v43.z.array(import_v43.z.number()) }))
|
178
|
+
});
|
179
|
+
var googleGenerativeAISingleEmbeddingResponseSchema = import_v43.z.object({
|
180
|
+
embedding: import_v43.z.object({ values: import_v43.z.array(import_v43.z.number()) })
|
116
181
|
});
|
117
182
|
|
118
183
|
// src/google-generative-ai-language-model.ts
|
119
|
-
var
|
120
|
-
var
|
184
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
185
|
+
var import_v47 = require("zod/v4");
|
121
186
|
|
122
187
|
// src/convert-json-schema-to-openapi-schema.ts
|
123
188
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
@@ -213,16 +278,18 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
|
213
278
|
return result;
|
214
279
|
}
|
215
280
|
function isEmptyObjectSchema(jsonSchema) {
|
216
|
-
return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0);
|
281
|
+
return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
|
217
282
|
}
|
218
283
|
|
219
284
|
// src/convert-to-google-generative-ai-messages.ts
|
220
285
|
var import_provider2 = require("@ai-sdk/provider");
|
221
286
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
222
|
-
function convertToGoogleGenerativeAIMessages(prompt) {
|
287
|
+
function convertToGoogleGenerativeAIMessages(prompt, options) {
|
288
|
+
var _a;
|
223
289
|
const systemInstructionParts = [];
|
224
290
|
const contents = [];
|
225
291
|
let systemMessagesAllowed = true;
|
292
|
+
const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
|
226
293
|
for (const { role, content } of prompt) {
|
227
294
|
switch (role) {
|
228
295
|
case "system": {
|
@@ -296,7 +363,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
296
363
|
return {
|
297
364
|
functionCall: {
|
298
365
|
name: part.toolName,
|
299
|
-
args: part.
|
366
|
+
args: part.input
|
300
367
|
}
|
301
368
|
};
|
302
369
|
}
|
@@ -314,7 +381,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
314
381
|
name: part.toolName,
|
315
382
|
response: {
|
316
383
|
name: part.toolName,
|
317
|
-
content: part.
|
384
|
+
content: part.output.value
|
318
385
|
}
|
319
386
|
}
|
320
387
|
}))
|
@@ -323,8 +390,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
323
390
|
}
|
324
391
|
}
|
325
392
|
}
|
393
|
+
if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
|
394
|
+
const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
|
395
|
+
contents[0].parts.unshift({ text: systemText + "\n\n" });
|
396
|
+
}
|
326
397
|
return {
|
327
|
-
systemInstruction: systemInstructionParts.length > 0 ? { parts: systemInstructionParts } : void 0,
|
398
|
+
systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
|
328
399
|
contents
|
329
400
|
};
|
330
401
|
}
|
@@ -334,13 +405,73 @@ function getModelPath(modelId) {
|
|
334
405
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
335
406
|
}
|
336
407
|
|
408
|
+
// src/google-generative-ai-options.ts
|
409
|
+
var import_v44 = require("zod/v4");
|
410
|
+
var googleGenerativeAIProviderOptions = import_v44.z.object({
|
411
|
+
responseModalities: import_v44.z.array(import_v44.z.enum(["TEXT", "IMAGE"])).optional(),
|
412
|
+
thinkingConfig: import_v44.z.object({
|
413
|
+
thinkingBudget: import_v44.z.number().optional(),
|
414
|
+
includeThoughts: import_v44.z.boolean().optional()
|
415
|
+
}).optional(),
|
416
|
+
/**
|
417
|
+
Optional.
|
418
|
+
The name of the cached content used as context to serve the prediction.
|
419
|
+
Format: cachedContents/{cachedContent}
|
420
|
+
*/
|
421
|
+
cachedContent: import_v44.z.string().optional(),
|
422
|
+
/**
|
423
|
+
* Optional. Enable structured output. Default is true.
|
424
|
+
*
|
425
|
+
* This is useful when the JSON Schema contains elements that are
|
426
|
+
* not supported by the OpenAPI schema version that
|
427
|
+
* Google Generative AI uses. You can use this to disable
|
428
|
+
* structured outputs if you need to.
|
429
|
+
*/
|
430
|
+
structuredOutputs: import_v44.z.boolean().optional(),
|
431
|
+
/**
|
432
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
433
|
+
*/
|
434
|
+
safetySettings: import_v44.z.array(
|
435
|
+
import_v44.z.object({
|
436
|
+
category: import_v44.z.enum([
|
437
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
438
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
439
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
440
|
+
"HARM_CATEGORY_HARASSMENT",
|
441
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
442
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
443
|
+
]),
|
444
|
+
threshold: import_v44.z.enum([
|
445
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
446
|
+
"BLOCK_LOW_AND_ABOVE",
|
447
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
448
|
+
"BLOCK_ONLY_HIGH",
|
449
|
+
"BLOCK_NONE",
|
450
|
+
"OFF"
|
451
|
+
])
|
452
|
+
})
|
453
|
+
).optional(),
|
454
|
+
threshold: import_v44.z.enum([
|
455
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
456
|
+
"BLOCK_LOW_AND_ABOVE",
|
457
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
458
|
+
"BLOCK_ONLY_HIGH",
|
459
|
+
"BLOCK_NONE",
|
460
|
+
"OFF"
|
461
|
+
]).optional(),
|
462
|
+
/**
|
463
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
464
|
+
*
|
465
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
466
|
+
*/
|
467
|
+
audioTimestamp: import_v44.z.boolean().optional()
|
468
|
+
});
|
469
|
+
|
337
470
|
// src/google-prepare-tools.ts
|
338
471
|
var import_provider3 = require("@ai-sdk/provider");
|
339
472
|
function prepareTools({
|
340
473
|
tools,
|
341
474
|
toolChoice,
|
342
|
-
useSearchGrounding,
|
343
|
-
dynamicRetrievalConfig,
|
344
475
|
modelId
|
345
476
|
}) {
|
346
477
|
var _a;
|
@@ -348,28 +479,87 @@ function prepareTools({
|
|
348
479
|
const toolWarnings = [];
|
349
480
|
const isGemini2 = modelId.includes("gemini-2");
|
350
481
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
351
|
-
if (
|
482
|
+
if (tools == null) {
|
483
|
+
return { tools: void 0, toolConfig: void 0, toolWarnings };
|
484
|
+
}
|
485
|
+
const hasFunctionTools = tools.some((tool) => tool.type === "function");
|
486
|
+
const hasProviderDefinedTools = tools.some(
|
487
|
+
(tool) => tool.type === "provider-defined"
|
488
|
+
);
|
489
|
+
if (hasFunctionTools && hasProviderDefinedTools) {
|
490
|
+
toolWarnings.push({
|
491
|
+
type: "unsupported-tool",
|
492
|
+
tool: tools.find((tool) => tool.type === "function"),
|
493
|
+
details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
|
494
|
+
});
|
495
|
+
}
|
496
|
+
if (hasProviderDefinedTools) {
|
497
|
+
const googleTools2 = {};
|
498
|
+
const providerDefinedTools = tools.filter(
|
499
|
+
(tool) => tool.type === "provider-defined"
|
500
|
+
);
|
501
|
+
providerDefinedTools.forEach((tool) => {
|
502
|
+
switch (tool.id) {
|
503
|
+
case "google.google_search":
|
504
|
+
if (isGemini2) {
|
505
|
+
googleTools2.googleSearch = {};
|
506
|
+
} else if (supportsDynamicRetrieval) {
|
507
|
+
googleTools2.googleSearchRetrieval = {
|
508
|
+
dynamicRetrievalConfig: {
|
509
|
+
mode: tool.args.mode,
|
510
|
+
dynamicThreshold: tool.args.dynamicThreshold
|
511
|
+
}
|
512
|
+
};
|
513
|
+
} else {
|
514
|
+
googleTools2.googleSearchRetrieval = {};
|
515
|
+
}
|
516
|
+
break;
|
517
|
+
case "google.url_context":
|
518
|
+
if (isGemini2) {
|
519
|
+
googleTools2.urlContext = {};
|
520
|
+
} else {
|
521
|
+
toolWarnings.push({
|
522
|
+
type: "unsupported-tool",
|
523
|
+
tool,
|
524
|
+
details: "The URL context tool is not supported with other Gemini models than Gemini 2."
|
525
|
+
});
|
526
|
+
}
|
527
|
+
break;
|
528
|
+
case "google.code_execution":
|
529
|
+
if (isGemini2) {
|
530
|
+
googleTools2.codeExecution = {};
|
531
|
+
} else {
|
532
|
+
toolWarnings.push({
|
533
|
+
type: "unsupported-tool",
|
534
|
+
tool,
|
535
|
+
details: "The code execution tools is not supported with other Gemini models than Gemini 2."
|
536
|
+
});
|
537
|
+
}
|
538
|
+
break;
|
539
|
+
default:
|
540
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
541
|
+
break;
|
542
|
+
}
|
543
|
+
});
|
352
544
|
return {
|
353
|
-
tools:
|
354
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
|
355
|
-
},
|
545
|
+
tools: Object.keys(googleTools2).length > 0 ? googleTools2 : void 0,
|
356
546
|
toolConfig: void 0,
|
357
547
|
toolWarnings
|
358
548
|
};
|
359
549
|
}
|
360
|
-
if (tools == null) {
|
361
|
-
return { tools: void 0, toolConfig: void 0, toolWarnings };
|
362
|
-
}
|
363
550
|
const functionDeclarations = [];
|
364
551
|
for (const tool of tools) {
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
552
|
+
switch (tool.type) {
|
553
|
+
case "function":
|
554
|
+
functionDeclarations.push({
|
555
|
+
name: tool.name,
|
556
|
+
description: (_a = tool.description) != null ? _a : "",
|
557
|
+
parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
|
558
|
+
});
|
559
|
+
break;
|
560
|
+
default:
|
561
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
562
|
+
break;
|
373
563
|
}
|
374
564
|
}
|
375
565
|
if (toolChoice == null) {
|
@@ -446,23 +636,80 @@ function mapGoogleGenerativeAIFinishReason({
|
|
446
636
|
}
|
447
637
|
}
|
448
638
|
|
639
|
+
// src/tool/google-search.ts
|
640
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
641
|
+
var import_v45 = require("zod/v4");
|
642
|
+
var groundingChunkSchema = import_v45.z.object({
|
643
|
+
web: import_v45.z.object({ uri: import_v45.z.string(), title: import_v45.z.string() }).nullish(),
|
644
|
+
retrievedContext: import_v45.z.object({ uri: import_v45.z.string(), title: import_v45.z.string() }).nullish()
|
645
|
+
});
|
646
|
+
var groundingMetadataSchema = import_v45.z.object({
|
647
|
+
webSearchQueries: import_v45.z.array(import_v45.z.string()).nullish(),
|
648
|
+
retrievalQueries: import_v45.z.array(import_v45.z.string()).nullish(),
|
649
|
+
searchEntryPoint: import_v45.z.object({ renderedContent: import_v45.z.string() }).nullish(),
|
650
|
+
groundingChunks: import_v45.z.array(groundingChunkSchema).nullish(),
|
651
|
+
groundingSupports: import_v45.z.array(
|
652
|
+
import_v45.z.object({
|
653
|
+
segment: import_v45.z.object({
|
654
|
+
startIndex: import_v45.z.number().nullish(),
|
655
|
+
endIndex: import_v45.z.number().nullish(),
|
656
|
+
text: import_v45.z.string().nullish()
|
657
|
+
}),
|
658
|
+
segment_text: import_v45.z.string().nullish(),
|
659
|
+
groundingChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
|
660
|
+
supportChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
|
661
|
+
confidenceScores: import_v45.z.array(import_v45.z.number()).nullish(),
|
662
|
+
confidenceScore: import_v45.z.array(import_v45.z.number()).nullish()
|
663
|
+
})
|
664
|
+
).nullish(),
|
665
|
+
retrievalMetadata: import_v45.z.union([
|
666
|
+
import_v45.z.object({
|
667
|
+
webDynamicRetrievalScore: import_v45.z.number()
|
668
|
+
}),
|
669
|
+
import_v45.z.object({})
|
670
|
+
]).nullish()
|
671
|
+
});
|
672
|
+
var googleSearch = (0, import_provider_utils4.createProviderDefinedToolFactory)({
|
673
|
+
id: "google.google_search",
|
674
|
+
name: "google_search",
|
675
|
+
inputSchema: import_v45.z.object({
|
676
|
+
mode: import_v45.z.enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
|
677
|
+
dynamicThreshold: import_v45.z.number().default(1)
|
678
|
+
})
|
679
|
+
});
|
680
|
+
|
681
|
+
// src/tool/url-context.ts
|
682
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
683
|
+
var import_v46 = require("zod/v4");
|
684
|
+
var urlMetadataSchema = import_v46.z.object({
|
685
|
+
retrievedUrl: import_v46.z.string(),
|
686
|
+
urlRetrievalStatus: import_v46.z.string()
|
687
|
+
});
|
688
|
+
var urlContextMetadataSchema = import_v46.z.object({
|
689
|
+
urlMetadata: import_v46.z.array(urlMetadataSchema)
|
690
|
+
});
|
691
|
+
var urlContext = (0, import_provider_utils5.createProviderDefinedToolFactory)({
|
692
|
+
id: "google.url_context",
|
693
|
+
name: "url_context",
|
694
|
+
inputSchema: import_v46.z.object({})
|
695
|
+
});
|
696
|
+
|
449
697
|
// src/google-generative-ai-language-model.ts
|
450
698
|
var GoogleGenerativeAILanguageModel = class {
|
451
|
-
constructor(modelId,
|
699
|
+
constructor(modelId, config) {
|
452
700
|
this.specificationVersion = "v2";
|
453
|
-
|
454
|
-
this.supportsImageUrls = false;
|
701
|
+
var _a;
|
455
702
|
this.modelId = modelId;
|
456
|
-
this.settings = settings;
|
457
703
|
this.config = config;
|
458
|
-
|
459
|
-
get supportsStructuredOutputs() {
|
460
|
-
var _a;
|
461
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
704
|
+
this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils6.generateId;
|
462
705
|
}
|
463
706
|
get provider() {
|
464
707
|
return this.config.provider;
|
465
708
|
}
|
709
|
+
get supportedUrls() {
|
710
|
+
var _a, _b, _c;
|
711
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
712
|
+
}
|
466
713
|
async getArgs({
|
467
714
|
prompt,
|
468
715
|
maxOutputTokens,
|
@@ -478,23 +725,31 @@ var GoogleGenerativeAILanguageModel = class {
|
|
478
725
|
toolChoice,
|
479
726
|
providerOptions
|
480
727
|
}) {
|
481
|
-
var _a;
|
728
|
+
var _a, _b;
|
482
729
|
const warnings = [];
|
483
|
-
const googleOptions = (0,
|
730
|
+
const googleOptions = await (0, import_provider_utils6.parseProviderOptions)({
|
484
731
|
provider: "google",
|
485
732
|
providerOptions,
|
486
|
-
schema:
|
733
|
+
schema: googleGenerativeAIProviderOptions
|
487
734
|
});
|
488
|
-
|
735
|
+
if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
|
736
|
+
warnings.push({
|
737
|
+
type: "other",
|
738
|
+
message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
|
739
|
+
});
|
740
|
+
}
|
741
|
+
const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
|
742
|
+
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
|
743
|
+
prompt,
|
744
|
+
{ isGemmaModel }
|
745
|
+
);
|
489
746
|
const {
|
490
|
-
tools:
|
747
|
+
tools: googleTools2,
|
491
748
|
toolConfig: googleToolConfig,
|
492
749
|
toolWarnings
|
493
750
|
} = prepareTools({
|
494
751
|
tools,
|
495
752
|
toolChoice,
|
496
|
-
useSearchGrounding: (_a = this.settings.useSearchGrounding) != null ? _a : false,
|
497
|
-
dynamicRetrievalConfig: this.settings.dynamicRetrievalConfig,
|
498
753
|
modelId: this.modelId
|
499
754
|
});
|
500
755
|
return {
|
@@ -513,62 +768,89 @@ var GoogleGenerativeAILanguageModel = class {
|
|
513
768
|
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
514
769
|
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
515
770
|
// so this is needed as an escape hatch:
|
516
|
-
|
517
|
-
|
518
|
-
|
771
|
+
// TODO convert into provider option
|
772
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
773
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
774
|
+
audioTimestamp: googleOptions.audioTimestamp
|
519
775
|
},
|
520
776
|
// provider options:
|
521
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities
|
777
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
778
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
522
779
|
},
|
523
780
|
contents,
|
524
|
-
systemInstruction,
|
525
|
-
safetySettings:
|
526
|
-
tools:
|
781
|
+
systemInstruction: isGemmaModel ? void 0 : systemInstruction,
|
782
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
783
|
+
tools: googleTools2,
|
527
784
|
toolConfig: googleToolConfig,
|
528
|
-
cachedContent:
|
785
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
529
786
|
},
|
530
787
|
warnings: [...warnings, ...toolWarnings]
|
531
788
|
};
|
532
789
|
}
|
533
|
-
supportsUrl(url) {
|
534
|
-
return this.config.isSupportedUrl(url);
|
535
|
-
}
|
536
790
|
async doGenerate(options) {
|
537
|
-
var _a, _b, _c, _d, _e, _f;
|
791
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
538
792
|
const { args, warnings } = await this.getArgs(options);
|
539
793
|
const body = JSON.stringify(args);
|
540
|
-
const mergedHeaders = (0,
|
541
|
-
await (0,
|
794
|
+
const mergedHeaders = (0, import_provider_utils6.combineHeaders)(
|
795
|
+
await (0, import_provider_utils6.resolve)(this.config.headers),
|
542
796
|
options.headers
|
543
797
|
);
|
544
798
|
const {
|
545
799
|
responseHeaders,
|
546
800
|
value: response,
|
547
801
|
rawValue: rawResponse
|
548
|
-
} = await (0,
|
802
|
+
} = await (0, import_provider_utils6.postJsonToApi)({
|
549
803
|
url: `${this.config.baseURL}/${getModelPath(
|
550
804
|
this.modelId
|
551
805
|
)}:generateContent`,
|
552
806
|
headers: mergedHeaders,
|
553
807
|
body: args,
|
554
808
|
failedResponseHandler: googleFailedResponseHandler,
|
555
|
-
successfulResponseHandler: (0,
|
809
|
+
successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(responseSchema),
|
556
810
|
abortSignal: options.abortSignal,
|
557
811
|
fetch: this.config.fetch
|
558
812
|
});
|
559
813
|
const candidate = response.candidates[0];
|
560
814
|
const content = [];
|
561
|
-
const parts =
|
815
|
+
const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
|
816
|
+
const usageMetadata = response.usageMetadata;
|
817
|
+
let lastCodeExecutionToolCallId;
|
562
818
|
for (const part of parts) {
|
563
|
-
if ("
|
564
|
-
|
819
|
+
if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
|
820
|
+
const toolCallId = this.config.generateId();
|
821
|
+
lastCodeExecutionToolCallId = toolCallId;
|
822
|
+
content.push({
|
823
|
+
type: "tool-call",
|
824
|
+
toolCallId,
|
825
|
+
toolName: "code_execution",
|
826
|
+
input: JSON.stringify(part.executableCode),
|
827
|
+
providerExecuted: true
|
828
|
+
});
|
829
|
+
} else if ("codeExecutionResult" in part && part.codeExecutionResult) {
|
830
|
+
content.push({
|
831
|
+
type: "tool-result",
|
832
|
+
// Assumes a result directly follows its corresponding call part.
|
833
|
+
toolCallId: lastCodeExecutionToolCallId,
|
834
|
+
toolName: "code_execution",
|
835
|
+
result: {
|
836
|
+
outcome: part.codeExecutionResult.outcome,
|
837
|
+
output: part.codeExecutionResult.output
|
838
|
+
},
|
839
|
+
providerExecuted: true
|
840
|
+
});
|
841
|
+
lastCodeExecutionToolCallId = void 0;
|
842
|
+
} else if ("text" in part && part.text != null && part.text.length > 0) {
|
843
|
+
if (part.thought === true) {
|
844
|
+
content.push({ type: "reasoning", text: part.text });
|
845
|
+
} else {
|
846
|
+
content.push({ type: "text", text: part.text });
|
847
|
+
}
|
565
848
|
} else if ("functionCall" in part) {
|
566
849
|
content.push({
|
567
850
|
type: "tool-call",
|
568
|
-
toolCallType: "function",
|
569
851
|
toolCallId: this.config.generateId(),
|
570
852
|
toolName: part.functionCall.name,
|
571
|
-
|
853
|
+
input: JSON.stringify(part.functionCall.args)
|
572
854
|
});
|
573
855
|
} else if ("inlineData" in part) {
|
574
856
|
content.push({
|
@@ -578,14 +860,13 @@ var GoogleGenerativeAILanguageModel = class {
|
|
578
860
|
});
|
579
861
|
}
|
580
862
|
}
|
581
|
-
const sources = (
|
863
|
+
const sources = (_d = extractSources({
|
582
864
|
groundingMetadata: candidate.groundingMetadata,
|
583
865
|
generateId: this.config.generateId
|
584
|
-
})) != null ?
|
866
|
+
})) != null ? _d : [];
|
585
867
|
for (const source of sources) {
|
586
868
|
content.push(source);
|
587
869
|
}
|
588
|
-
const usageMetadata = response.usageMetadata;
|
589
870
|
return {
|
590
871
|
content,
|
591
872
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
@@ -593,14 +874,19 @@ var GoogleGenerativeAILanguageModel = class {
|
|
593
874
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
594
875
|
}),
|
595
876
|
usage: {
|
596
|
-
inputTokens: (
|
597
|
-
outputTokens: (
|
877
|
+
inputTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _e : void 0,
|
878
|
+
outputTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _f : void 0,
|
879
|
+
totalTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _g : void 0,
|
880
|
+
reasoningTokens: (_h = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _h : void 0,
|
881
|
+
cachedInputTokens: (_i = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _i : void 0
|
598
882
|
},
|
599
883
|
warnings,
|
600
884
|
providerMetadata: {
|
601
885
|
google: {
|
602
|
-
groundingMetadata: (
|
603
|
-
|
886
|
+
groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
|
887
|
+
urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
|
888
|
+
safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null,
|
889
|
+
usageMetadata: usageMetadata != null ? usageMetadata : null
|
604
890
|
}
|
605
891
|
},
|
606
892
|
request: { body },
|
@@ -614,29 +900,35 @@ var GoogleGenerativeAILanguageModel = class {
|
|
614
900
|
async doStream(options) {
|
615
901
|
const { args, warnings } = await this.getArgs(options);
|
616
902
|
const body = JSON.stringify(args);
|
617
|
-
const headers = (0,
|
618
|
-
await (0,
|
903
|
+
const headers = (0, import_provider_utils6.combineHeaders)(
|
904
|
+
await (0, import_provider_utils6.resolve)(this.config.headers),
|
619
905
|
options.headers
|
620
906
|
);
|
621
|
-
const { responseHeaders, value: response } = await (0,
|
907
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
|
622
908
|
url: `${this.config.baseURL}/${getModelPath(
|
623
909
|
this.modelId
|
624
910
|
)}:streamGenerateContent?alt=sse`,
|
625
911
|
headers,
|
626
912
|
body: args,
|
627
913
|
failedResponseHandler: googleFailedResponseHandler,
|
628
|
-
successfulResponseHandler: (0,
|
914
|
+
successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(chunkSchema),
|
629
915
|
abortSignal: options.abortSignal,
|
630
916
|
fetch: this.config.fetch
|
631
917
|
});
|
632
918
|
let finishReason = "unknown";
|
633
919
|
const usage = {
|
634
920
|
inputTokens: void 0,
|
635
|
-
outputTokens: void 0
|
921
|
+
outputTokens: void 0,
|
922
|
+
totalTokens: void 0
|
636
923
|
};
|
637
924
|
let providerMetadata = void 0;
|
638
|
-
const
|
925
|
+
const generateId3 = this.config.generateId;
|
639
926
|
let hasToolCalls = false;
|
927
|
+
let currentTextBlockId = null;
|
928
|
+
let currentReasoningBlockId = null;
|
929
|
+
let blockCounter = 0;
|
930
|
+
const emittedSourceUrls = /* @__PURE__ */ new Set();
|
931
|
+
let lastCodeExecutionToolCallId;
|
640
932
|
return {
|
641
933
|
stream: response.pipeThrough(
|
642
934
|
new TransformStream({
|
@@ -644,7 +936,10 @@ var GoogleGenerativeAILanguageModel = class {
|
|
644
936
|
controller.enqueue({ type: "stream-start", warnings });
|
645
937
|
},
|
646
938
|
transform(chunk, controller) {
|
647
|
-
var _a, _b, _c, _d, _e, _f;
|
939
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
940
|
+
if (options.includeRawChunks) {
|
941
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
942
|
+
}
|
648
943
|
if (!chunk.success) {
|
649
944
|
controller.enqueue({ type: "error", error: chunk.error });
|
650
945
|
return;
|
@@ -654,16 +949,99 @@ var GoogleGenerativeAILanguageModel = class {
|
|
654
949
|
if (usageMetadata != null) {
|
655
950
|
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
656
951
|
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
952
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
953
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
954
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
657
955
|
}
|
658
|
-
const candidate = (
|
956
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
659
957
|
if (candidate == null) {
|
660
958
|
return;
|
661
959
|
}
|
662
960
|
const content = candidate.content;
|
961
|
+
const sources = extractSources({
|
962
|
+
groundingMetadata: candidate.groundingMetadata,
|
963
|
+
generateId: generateId3
|
964
|
+
});
|
965
|
+
if (sources != null) {
|
966
|
+
for (const source of sources) {
|
967
|
+
if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
|
968
|
+
emittedSourceUrls.add(source.url);
|
969
|
+
controller.enqueue(source);
|
970
|
+
}
|
971
|
+
}
|
972
|
+
}
|
663
973
|
if (content != null) {
|
664
|
-
const
|
665
|
-
|
666
|
-
|
974
|
+
const parts = (_g = content.parts) != null ? _g : [];
|
975
|
+
for (const part of parts) {
|
976
|
+
if ("executableCode" in part && ((_h = part.executableCode) == null ? void 0 : _h.code)) {
|
977
|
+
const toolCallId = generateId3();
|
978
|
+
lastCodeExecutionToolCallId = toolCallId;
|
979
|
+
controller.enqueue({
|
980
|
+
type: "tool-call",
|
981
|
+
toolCallId,
|
982
|
+
toolName: "code_execution",
|
983
|
+
input: JSON.stringify(part.executableCode),
|
984
|
+
providerExecuted: true
|
985
|
+
});
|
986
|
+
hasToolCalls = true;
|
987
|
+
} else if ("codeExecutionResult" in part && part.codeExecutionResult) {
|
988
|
+
const toolCallId = lastCodeExecutionToolCallId;
|
989
|
+
if (toolCallId) {
|
990
|
+
controller.enqueue({
|
991
|
+
type: "tool-result",
|
992
|
+
toolCallId,
|
993
|
+
toolName: "code_execution",
|
994
|
+
result: {
|
995
|
+
outcome: part.codeExecutionResult.outcome,
|
996
|
+
output: part.codeExecutionResult.output
|
997
|
+
},
|
998
|
+
providerExecuted: true
|
999
|
+
});
|
1000
|
+
lastCodeExecutionToolCallId = void 0;
|
1001
|
+
}
|
1002
|
+
} else if ("text" in part && part.text != null && part.text.length > 0) {
|
1003
|
+
if (part.thought === true) {
|
1004
|
+
if (currentTextBlockId !== null) {
|
1005
|
+
controller.enqueue({
|
1006
|
+
type: "text-end",
|
1007
|
+
id: currentTextBlockId
|
1008
|
+
});
|
1009
|
+
currentTextBlockId = null;
|
1010
|
+
}
|
1011
|
+
if (currentReasoningBlockId === null) {
|
1012
|
+
currentReasoningBlockId = String(blockCounter++);
|
1013
|
+
controller.enqueue({
|
1014
|
+
type: "reasoning-start",
|
1015
|
+
id: currentReasoningBlockId
|
1016
|
+
});
|
1017
|
+
}
|
1018
|
+
controller.enqueue({
|
1019
|
+
type: "reasoning-delta",
|
1020
|
+
id: currentReasoningBlockId,
|
1021
|
+
delta: part.text
|
1022
|
+
});
|
1023
|
+
} else {
|
1024
|
+
if (currentReasoningBlockId !== null) {
|
1025
|
+
controller.enqueue({
|
1026
|
+
type: "reasoning-end",
|
1027
|
+
id: currentReasoningBlockId
|
1028
|
+
});
|
1029
|
+
currentReasoningBlockId = null;
|
1030
|
+
}
|
1031
|
+
if (currentTextBlockId === null) {
|
1032
|
+
currentTextBlockId = String(blockCounter++);
|
1033
|
+
controller.enqueue({
|
1034
|
+
type: "text-start",
|
1035
|
+
id: currentTextBlockId
|
1036
|
+
});
|
1037
|
+
}
|
1038
|
+
controller.enqueue({
|
1039
|
+
type: "text-delta",
|
1040
|
+
id: currentTextBlockId,
|
1041
|
+
delta: part.text
|
1042
|
+
});
|
1043
|
+
}
|
1044
|
+
}
|
667
1045
|
}
|
668
1046
|
const inlineDataParts = getInlineDataParts(content.parts);
|
669
1047
|
if (inlineDataParts != null) {
|
@@ -677,23 +1055,29 @@ var GoogleGenerativeAILanguageModel = class {
|
|
677
1055
|
}
|
678
1056
|
const toolCallDeltas = getToolCallsFromParts({
|
679
1057
|
parts: content.parts,
|
680
|
-
generateId:
|
1058
|
+
generateId: generateId3
|
681
1059
|
});
|
682
1060
|
if (toolCallDeltas != null) {
|
683
1061
|
for (const toolCall of toolCallDeltas) {
|
684
1062
|
controller.enqueue({
|
685
|
-
type: "tool-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
1063
|
+
type: "tool-input-start",
|
1064
|
+
id: toolCall.toolCallId,
|
1065
|
+
toolName: toolCall.toolName
|
1066
|
+
});
|
1067
|
+
controller.enqueue({
|
1068
|
+
type: "tool-input-delta",
|
1069
|
+
id: toolCall.toolCallId,
|
1070
|
+
delta: toolCall.args
|
1071
|
+
});
|
1072
|
+
controller.enqueue({
|
1073
|
+
type: "tool-input-end",
|
1074
|
+
id: toolCall.toolCallId
|
690
1075
|
});
|
691
1076
|
controller.enqueue({
|
692
1077
|
type: "tool-call",
|
693
|
-
toolCallType: "function",
|
694
1078
|
toolCallId: toolCall.toolCallId,
|
695
1079
|
toolName: toolCall.toolName,
|
696
|
-
|
1080
|
+
input: toolCall.args
|
697
1081
|
});
|
698
1082
|
hasToolCalls = true;
|
699
1083
|
}
|
@@ -704,22 +1088,31 @@ var GoogleGenerativeAILanguageModel = class {
|
|
704
1088
|
finishReason: candidate.finishReason,
|
705
1089
|
hasToolCalls
|
706
1090
|
});
|
707
|
-
const sources = (_d = extractSources({
|
708
|
-
groundingMetadata: candidate.groundingMetadata,
|
709
|
-
generateId: generateId2
|
710
|
-
})) != null ? _d : [];
|
711
|
-
for (const source of sources) {
|
712
|
-
controller.enqueue(source);
|
713
|
-
}
|
714
1091
|
providerMetadata = {
|
715
1092
|
google: {
|
716
|
-
groundingMetadata: (
|
717
|
-
|
1093
|
+
groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
|
1094
|
+
urlContextMetadata: (_j = candidate.urlContextMetadata) != null ? _j : null,
|
1095
|
+
safetyRatings: (_k = candidate.safetyRatings) != null ? _k : null
|
718
1096
|
}
|
719
1097
|
};
|
1098
|
+
if (usageMetadata != null) {
|
1099
|
+
providerMetadata.google.usageMetadata = usageMetadata;
|
1100
|
+
}
|
720
1101
|
}
|
721
1102
|
},
|
722
1103
|
flush(controller) {
|
1104
|
+
if (currentTextBlockId !== null) {
|
1105
|
+
controller.enqueue({
|
1106
|
+
type: "text-end",
|
1107
|
+
id: currentTextBlockId
|
1108
|
+
});
|
1109
|
+
}
|
1110
|
+
if (currentReasoningBlockId !== null) {
|
1111
|
+
controller.enqueue({
|
1112
|
+
type: "reasoning-end",
|
1113
|
+
id: currentReasoningBlockId
|
1114
|
+
});
|
1115
|
+
}
|
723
1116
|
controller.enqueue({
|
724
1117
|
type: "finish",
|
725
1118
|
finishReason,
|
@@ -736,26 +1129,18 @@ var GoogleGenerativeAILanguageModel = class {
|
|
736
1129
|
};
|
737
1130
|
function getToolCallsFromParts({
|
738
1131
|
parts,
|
739
|
-
generateId:
|
1132
|
+
generateId: generateId3
|
740
1133
|
}) {
|
741
1134
|
const functionCallParts = parts == null ? void 0 : parts.filter(
|
742
1135
|
(part) => "functionCall" in part
|
743
1136
|
);
|
744
1137
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
745
1138
|
type: "tool-call",
|
746
|
-
|
747
|
-
toolCallId: generateId2(),
|
1139
|
+
toolCallId: generateId3(),
|
748
1140
|
toolName: part.functionCall.name,
|
749
1141
|
args: JSON.stringify(part.functionCall.args)
|
750
1142
|
}));
|
751
1143
|
}
|
752
|
-
function getTextFromParts(parts) {
|
753
|
-
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
754
|
-
return textParts == null || textParts.length === 0 ? void 0 : {
|
755
|
-
type: "text",
|
756
|
-
text: textParts.map((part) => part.text).join("")
|
757
|
-
};
|
758
|
-
}
|
759
1144
|
function getInlineDataParts(parts) {
|
760
1145
|
return parts == null ? void 0 : parts.filter(
|
761
1146
|
(part) => "inlineData" in part
|
@@ -763,7 +1148,7 @@ function getInlineDataParts(parts) {
|
|
763
1148
|
}
|
764
1149
|
function extractSources({
|
765
1150
|
groundingMetadata,
|
766
|
-
generateId:
|
1151
|
+
generateId: generateId3
|
767
1152
|
}) {
|
768
1153
|
var _a;
|
769
1154
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
@@ -771,146 +1156,273 @@ function extractSources({
|
|
771
1156
|
).map((chunk) => ({
|
772
1157
|
type: "source",
|
773
1158
|
sourceType: "url",
|
774
|
-
id:
|
1159
|
+
id: generateId3(),
|
775
1160
|
url: chunk.web.uri,
|
776
1161
|
title: chunk.web.title
|
777
1162
|
}));
|
778
1163
|
}
|
779
|
-
var contentSchema =
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
functionCall: import_zod3.z.object({
|
788
|
-
name: import_zod3.z.string(),
|
789
|
-
args: import_zod3.z.unknown()
|
1164
|
+
var contentSchema = import_v47.z.object({
|
1165
|
+
parts: import_v47.z.array(
|
1166
|
+
import_v47.z.union([
|
1167
|
+
// note: order matters since text can be fully empty
|
1168
|
+
import_v47.z.object({
|
1169
|
+
functionCall: import_v47.z.object({
|
1170
|
+
name: import_v47.z.string(),
|
1171
|
+
args: import_v47.z.unknown()
|
790
1172
|
})
|
791
1173
|
}),
|
792
|
-
|
793
|
-
inlineData:
|
794
|
-
mimeType:
|
795
|
-
data:
|
1174
|
+
import_v47.z.object({
|
1175
|
+
inlineData: import_v47.z.object({
|
1176
|
+
mimeType: import_v47.z.string(),
|
1177
|
+
data: import_v47.z.string()
|
796
1178
|
})
|
1179
|
+
}),
|
1180
|
+
import_v47.z.object({
|
1181
|
+
executableCode: import_v47.z.object({
|
1182
|
+
language: import_v47.z.string(),
|
1183
|
+
code: import_v47.z.string()
|
1184
|
+
}).nullish(),
|
1185
|
+
codeExecutionResult: import_v47.z.object({
|
1186
|
+
outcome: import_v47.z.string(),
|
1187
|
+
output: import_v47.z.string()
|
1188
|
+
}).nullish(),
|
1189
|
+
text: import_v47.z.string().nullish(),
|
1190
|
+
thought: import_v47.z.boolean().nullish()
|
797
1191
|
})
|
798
1192
|
])
|
799
1193
|
).nullish()
|
800
1194
|
});
|
801
|
-
var
|
802
|
-
|
803
|
-
|
1195
|
+
var safetyRatingSchema = import_v47.z.object({
|
1196
|
+
category: import_v47.z.string().nullish(),
|
1197
|
+
probability: import_v47.z.string().nullish(),
|
1198
|
+
probabilityScore: import_v47.z.number().nullish(),
|
1199
|
+
severity: import_v47.z.string().nullish(),
|
1200
|
+
severityScore: import_v47.z.number().nullish(),
|
1201
|
+
blocked: import_v47.z.boolean().nullish()
|
804
1202
|
});
|
805
|
-
var
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
import_zod3.z.object({
|
812
|
-
segment: import_zod3.z.object({
|
813
|
-
startIndex: import_zod3.z.number().nullish(),
|
814
|
-
endIndex: import_zod3.z.number().nullish(),
|
815
|
-
text: import_zod3.z.string().nullish()
|
816
|
-
}),
|
817
|
-
segment_text: import_zod3.z.string().nullish(),
|
818
|
-
groundingChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
819
|
-
supportChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
820
|
-
confidenceScores: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
821
|
-
confidenceScore: import_zod3.z.array(import_zod3.z.number()).nullish()
|
822
|
-
})
|
823
|
-
).nullish(),
|
824
|
-
retrievalMetadata: import_zod3.z.union([
|
825
|
-
import_zod3.z.object({
|
826
|
-
webDynamicRetrievalScore: import_zod3.z.number()
|
827
|
-
}),
|
828
|
-
import_zod3.z.object({})
|
829
|
-
]).nullish()
|
830
|
-
});
|
831
|
-
var safetyRatingSchema = import_zod3.z.object({
|
832
|
-
category: import_zod3.z.string(),
|
833
|
-
probability: import_zod3.z.string(),
|
834
|
-
probabilityScore: import_zod3.z.number().nullish(),
|
835
|
-
severity: import_zod3.z.string().nullish(),
|
836
|
-
severityScore: import_zod3.z.number().nullish(),
|
837
|
-
blocked: import_zod3.z.boolean().nullish()
|
1203
|
+
var usageSchema = import_v47.z.object({
|
1204
|
+
cachedContentTokenCount: import_v47.z.number().nullish(),
|
1205
|
+
thoughtsTokenCount: import_v47.z.number().nullish(),
|
1206
|
+
promptTokenCount: import_v47.z.number().nullish(),
|
1207
|
+
candidatesTokenCount: import_v47.z.number().nullish(),
|
1208
|
+
totalTokenCount: import_v47.z.number().nullish()
|
838
1209
|
});
|
839
|
-
var responseSchema =
|
840
|
-
candidates:
|
841
|
-
|
842
|
-
content: contentSchema.nullish().or(
|
843
|
-
finishReason:
|
844
|
-
safetyRatings:
|
845
|
-
groundingMetadata: groundingMetadataSchema.nullish()
|
1210
|
+
var responseSchema = import_v47.z.object({
|
1211
|
+
candidates: import_v47.z.array(
|
1212
|
+
import_v47.z.object({
|
1213
|
+
content: contentSchema.nullish().or(import_v47.z.object({}).strict()),
|
1214
|
+
finishReason: import_v47.z.string().nullish(),
|
1215
|
+
safetyRatings: import_v47.z.array(safetyRatingSchema).nullish(),
|
1216
|
+
groundingMetadata: groundingMetadataSchema.nullish(),
|
1217
|
+
urlContextMetadata: urlContextMetadataSchema.nullish()
|
846
1218
|
})
|
847
1219
|
),
|
848
|
-
usageMetadata:
|
849
|
-
promptTokenCount: import_zod3.z.number().nullish(),
|
850
|
-
candidatesTokenCount: import_zod3.z.number().nullish(),
|
851
|
-
totalTokenCount: import_zod3.z.number().nullish()
|
852
|
-
}).nullish()
|
1220
|
+
usageMetadata: usageSchema.nullish()
|
853
1221
|
});
|
854
|
-
var chunkSchema =
|
855
|
-
candidates:
|
856
|
-
|
1222
|
+
var chunkSchema = import_v47.z.object({
|
1223
|
+
candidates: import_v47.z.array(
|
1224
|
+
import_v47.z.object({
|
857
1225
|
content: contentSchema.nullish(),
|
858
|
-
finishReason:
|
859
|
-
safetyRatings:
|
860
|
-
groundingMetadata: groundingMetadataSchema.nullish()
|
1226
|
+
finishReason: import_v47.z.string().nullish(),
|
1227
|
+
safetyRatings: import_v47.z.array(safetyRatingSchema).nullish(),
|
1228
|
+
groundingMetadata: groundingMetadataSchema.nullish(),
|
1229
|
+
urlContextMetadata: urlContextMetadataSchema.nullish()
|
861
1230
|
})
|
862
1231
|
).nullish(),
|
863
|
-
usageMetadata:
|
864
|
-
promptTokenCount: import_zod3.z.number().nullish(),
|
865
|
-
candidatesTokenCount: import_zod3.z.number().nullish(),
|
866
|
-
totalTokenCount: import_zod3.z.number().nullish()
|
867
|
-
}).nullish()
|
1232
|
+
usageMetadata: usageSchema.nullish()
|
868
1233
|
});
|
869
|
-
|
870
|
-
|
1234
|
+
|
1235
|
+
// src/tool/code-execution.ts
|
1236
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
1237
|
+
var import_v48 = require("zod/v4");
|
1238
|
+
var codeExecution = (0, import_provider_utils7.createProviderDefinedToolFactoryWithOutputSchema)({
|
1239
|
+
id: "google.code_execution",
|
1240
|
+
name: "code_execution",
|
1241
|
+
inputSchema: import_v48.z.object({
|
1242
|
+
language: import_v48.z.string().describe("The programming language of the code."),
|
1243
|
+
code: import_v48.z.string().describe("The code to be executed.")
|
1244
|
+
}),
|
1245
|
+
outputSchema: import_v48.z.object({
|
1246
|
+
outcome: import_v48.z.string().describe('The outcome of the execution (e.g., "OUTCOME_OK").'),
|
1247
|
+
output: import_v48.z.string().describe("The output from the code execution.")
|
1248
|
+
})
|
871
1249
|
});
|
872
1250
|
|
873
|
-
// src/google-
|
874
|
-
|
875
|
-
|
876
|
-
|
1251
|
+
// src/google-tools.ts
|
1252
|
+
var googleTools = {
|
1253
|
+
/**
|
1254
|
+
* Creates a Google search tool that gives Google direct access to real-time web content.
|
1255
|
+
* Must have name "google_search".
|
1256
|
+
*/
|
1257
|
+
googleSearch,
|
1258
|
+
/**
|
1259
|
+
* Creates a URL context tool that gives Google direct access to real-time web content.
|
1260
|
+
* Must have name "url_context".
|
1261
|
+
*/
|
1262
|
+
urlContext,
|
1263
|
+
/**
|
1264
|
+
* A tool that enables the model to generate and run Python code.
|
1265
|
+
* Must have name "code_execution".
|
1266
|
+
*
|
1267
|
+
* @note Ensure the selected model supports Code Execution.
|
1268
|
+
* Multi-tool usage with the code execution tool is typically compatible with Gemini >=2 models.
|
1269
|
+
*
|
1270
|
+
* @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)
|
1271
|
+
* @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)
|
1272
|
+
*/
|
1273
|
+
codeExecution
|
1274
|
+
};
|
1275
|
+
|
1276
|
+
// src/google-generative-ai-image-model.ts
|
1277
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
1278
|
+
var import_v49 = require("zod/v4");
|
1279
|
+
var GoogleGenerativeAIImageModel = class {
|
1280
|
+
constructor(modelId, settings, config) {
|
1281
|
+
this.modelId = modelId;
|
1282
|
+
this.settings = settings;
|
1283
|
+
this.config = config;
|
1284
|
+
this.specificationVersion = "v2";
|
1285
|
+
}
|
1286
|
+
get maxImagesPerCall() {
|
1287
|
+
var _a;
|
1288
|
+
return (_a = this.settings.maxImagesPerCall) != null ? _a : 4;
|
1289
|
+
}
|
1290
|
+
get provider() {
|
1291
|
+
return this.config.provider;
|
1292
|
+
}
|
1293
|
+
async doGenerate(options) {
|
1294
|
+
var _a, _b, _c;
|
1295
|
+
const {
|
1296
|
+
prompt,
|
1297
|
+
n = 1,
|
1298
|
+
size = "1024x1024",
|
1299
|
+
aspectRatio = "1:1",
|
1300
|
+
seed,
|
1301
|
+
providerOptions,
|
1302
|
+
headers,
|
1303
|
+
abortSignal
|
1304
|
+
} = options;
|
1305
|
+
const warnings = [];
|
1306
|
+
if (size != null) {
|
1307
|
+
warnings.push({
|
1308
|
+
type: "unsupported-setting",
|
1309
|
+
setting: "size",
|
1310
|
+
details: "This model does not support the `size` option. Use `aspectRatio` instead."
|
1311
|
+
});
|
1312
|
+
}
|
1313
|
+
if (seed != null) {
|
1314
|
+
warnings.push({
|
1315
|
+
type: "unsupported-setting",
|
1316
|
+
setting: "seed",
|
1317
|
+
details: "This model does not support the `seed` option through this provider."
|
1318
|
+
});
|
1319
|
+
}
|
1320
|
+
const googleOptions = await (0, import_provider_utils8.parseProviderOptions)({
|
1321
|
+
provider: "google",
|
1322
|
+
providerOptions,
|
1323
|
+
schema: googleImageProviderOptionsSchema
|
1324
|
+
});
|
1325
|
+
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
1326
|
+
const parameters = {
|
1327
|
+
sampleCount: n
|
1328
|
+
};
|
1329
|
+
if (aspectRatio != null) {
|
1330
|
+
parameters.aspectRatio = aspectRatio;
|
1331
|
+
}
|
1332
|
+
if (googleOptions) {
|
1333
|
+
Object.assign(parameters, googleOptions);
|
1334
|
+
}
|
1335
|
+
const body = {
|
1336
|
+
instances: [{ prompt }],
|
1337
|
+
parameters
|
1338
|
+
};
|
1339
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
1340
|
+
url: `${this.config.baseURL}/models/${this.modelId}:predict`,
|
1341
|
+
headers: (0, import_provider_utils8.combineHeaders)(await (0, import_provider_utils8.resolve)(this.config.headers), headers),
|
1342
|
+
body,
|
1343
|
+
failedResponseHandler: googleFailedResponseHandler,
|
1344
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
1345
|
+
googleImageResponseSchema
|
1346
|
+
),
|
1347
|
+
abortSignal,
|
1348
|
+
fetch: this.config.fetch
|
1349
|
+
});
|
1350
|
+
return {
|
1351
|
+
images: response.predictions.map(
|
1352
|
+
(p) => p.bytesBase64Encoded
|
1353
|
+
),
|
1354
|
+
warnings: warnings != null ? warnings : [],
|
1355
|
+
providerMetadata: {
|
1356
|
+
google: {
|
1357
|
+
images: response.predictions.map((prediction) => ({
|
1358
|
+
// Add any prediction-specific metadata here
|
1359
|
+
}))
|
1360
|
+
}
|
1361
|
+
},
|
1362
|
+
response: {
|
1363
|
+
timestamp: currentDate,
|
1364
|
+
modelId: this.modelId,
|
1365
|
+
headers: responseHeaders
|
1366
|
+
}
|
1367
|
+
};
|
1368
|
+
}
|
1369
|
+
};
|
1370
|
+
var googleImageResponseSchema = import_v49.z.object({
|
1371
|
+
predictions: import_v49.z.array(import_v49.z.object({ bytesBase64Encoded: import_v49.z.string() })).default([])
|
1372
|
+
});
|
1373
|
+
var googleImageProviderOptionsSchema = import_v49.z.object({
|
1374
|
+
personGeneration: import_v49.z.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
|
1375
|
+
aspectRatio: import_v49.z.enum(["1:1", "3:4", "4:3", "9:16", "16:9"]).nullish()
|
1376
|
+
});
|
877
1377
|
|
878
1378
|
// src/google-provider.ts
|
879
1379
|
function createGoogleGenerativeAI(options = {}) {
|
880
1380
|
var _a;
|
881
|
-
const baseURL = (_a = (0,
|
1381
|
+
const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
|
882
1382
|
const getHeaders = () => ({
|
883
|
-
"x-goog-api-key": (0,
|
1383
|
+
"x-goog-api-key": (0, import_provider_utils9.loadApiKey)({
|
884
1384
|
apiKey: options.apiKey,
|
885
1385
|
environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
|
886
1386
|
description: "Google Generative AI"
|
887
1387
|
}),
|
888
1388
|
...options.headers
|
889
1389
|
});
|
890
|
-
const createChatModel = (modelId
|
1390
|
+
const createChatModel = (modelId) => {
|
891
1391
|
var _a2;
|
892
|
-
return new GoogleGenerativeAILanguageModel(modelId,
|
1392
|
+
return new GoogleGenerativeAILanguageModel(modelId, {
|
893
1393
|
provider: "google.generative-ai",
|
894
1394
|
baseURL,
|
895
1395
|
headers: getHeaders,
|
896
|
-
generateId: (_a2 = options.generateId) != null ? _a2 :
|
897
|
-
|
1396
|
+
generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils9.generateId,
|
1397
|
+
supportedUrls: () => ({
|
1398
|
+
"*": [
|
1399
|
+
// Only allow requests to the Google Generative Language "files" endpoint
|
1400
|
+
// e.g. https://generativelanguage.googleapis.com/v1beta/files/...
|
1401
|
+
new RegExp(`^${baseURL}/files/.*$`)
|
1402
|
+
]
|
1403
|
+
}),
|
898
1404
|
fetch: options.fetch
|
899
1405
|
});
|
900
1406
|
};
|
901
|
-
const createEmbeddingModel = (modelId
|
1407
|
+
const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
|
902
1408
|
provider: "google.generative-ai",
|
903
1409
|
baseURL,
|
904
1410
|
headers: getHeaders,
|
905
1411
|
fetch: options.fetch
|
906
1412
|
});
|
907
|
-
const
|
1413
|
+
const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
|
1414
|
+
provider: "google.generative-ai",
|
1415
|
+
baseURL,
|
1416
|
+
headers: getHeaders,
|
1417
|
+
fetch: options.fetch
|
1418
|
+
});
|
1419
|
+
const provider = function(modelId) {
|
908
1420
|
if (new.target) {
|
909
1421
|
throw new Error(
|
910
1422
|
"The Google Generative AI model function cannot be called with the new keyword."
|
911
1423
|
);
|
912
1424
|
}
|
913
|
-
return createChatModel(modelId
|
1425
|
+
return createChatModel(modelId);
|
914
1426
|
};
|
915
1427
|
provider.languageModel = createChatModel;
|
916
1428
|
provider.chat = createChatModel;
|
@@ -918,9 +1430,9 @@ function createGoogleGenerativeAI(options = {}) {
|
|
918
1430
|
provider.embedding = createEmbeddingModel;
|
919
1431
|
provider.textEmbedding = createEmbeddingModel;
|
920
1432
|
provider.textEmbeddingModel = createEmbeddingModel;
|
921
|
-
provider.
|
922
|
-
|
923
|
-
|
1433
|
+
provider.image = createImageModel;
|
1434
|
+
provider.imageModel = createImageModel;
|
1435
|
+
provider.tools = googleTools;
|
924
1436
|
return provider;
|
925
1437
|
}
|
926
1438
|
var google = createGoogleGenerativeAI();
|