@ai-sdk/google 2.0.0-canary.8 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +570 -0
- package/README.md +2 -2
- package/dist/index.d.mts +157 -267
- package/dist/index.d.ts +157 -267
- package/dist/index.js +757 -230
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +751 -220
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +47 -254
- package/dist/internal/index.d.ts +47 -254
- package/dist/internal/index.js +554 -201
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +541 -189
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +11 -9
package/dist/index.js
CHANGED
@@ -26,22 +26,21 @@ __export(src_exports, {
|
|
26
26
|
module.exports = __toCommonJS(src_exports);
|
27
27
|
|
28
28
|
// src/google-provider.ts
|
29
|
-
var
|
30
|
-
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
29
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
31
30
|
|
32
31
|
// src/google-generative-ai-embedding-model.ts
|
33
32
|
var import_provider = require("@ai-sdk/provider");
|
34
33
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
35
|
-
var
|
34
|
+
var import_v43 = require("zod/v4");
|
36
35
|
|
37
36
|
// src/google-error.ts
|
38
37
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
39
|
-
var
|
40
|
-
var googleErrorDataSchema =
|
41
|
-
error:
|
42
|
-
code:
|
43
|
-
message:
|
44
|
-
status:
|
38
|
+
var import_v4 = require("zod/v4");
|
39
|
+
var googleErrorDataSchema = import_v4.z.object({
|
40
|
+
error: import_v4.z.object({
|
41
|
+
code: import_v4.z.number().nullable(),
|
42
|
+
message: import_v4.z.string(),
|
43
|
+
status: import_v4.z.string()
|
45
44
|
})
|
46
45
|
});
|
47
46
|
var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
@@ -49,28 +48,61 @@ var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
|
|
49
48
|
errorToMessage: (data) => data.error.message
|
50
49
|
});
|
51
50
|
|
51
|
+
// src/google-generative-ai-embedding-options.ts
|
52
|
+
var import_v42 = require("zod/v4");
|
53
|
+
var googleGenerativeAIEmbeddingProviderOptions = import_v42.z.object({
|
54
|
+
/**
|
55
|
+
* Optional. Optional reduced dimension for the output embedding.
|
56
|
+
* If set, excessive values in the output embedding are truncated from the end.
|
57
|
+
*/
|
58
|
+
outputDimensionality: import_v42.z.number().optional(),
|
59
|
+
/**
|
60
|
+
* Optional. Specifies the task type for generating embeddings.
|
61
|
+
* Supported task types:
|
62
|
+
* - SEMANTIC_SIMILARITY: Optimized for text similarity.
|
63
|
+
* - CLASSIFICATION: Optimized for text classification.
|
64
|
+
* - CLUSTERING: Optimized for clustering texts based on similarity.
|
65
|
+
* - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
|
66
|
+
* - RETRIEVAL_QUERY: Optimized for query-based retrieval.
|
67
|
+
* - QUESTION_ANSWERING: Optimized for answering questions.
|
68
|
+
* - FACT_VERIFICATION: Optimized for verifying factual information.
|
69
|
+
* - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
|
70
|
+
*/
|
71
|
+
taskType: import_v42.z.enum([
|
72
|
+
"SEMANTIC_SIMILARITY",
|
73
|
+
"CLASSIFICATION",
|
74
|
+
"CLUSTERING",
|
75
|
+
"RETRIEVAL_DOCUMENT",
|
76
|
+
"RETRIEVAL_QUERY",
|
77
|
+
"QUESTION_ANSWERING",
|
78
|
+
"FACT_VERIFICATION",
|
79
|
+
"CODE_RETRIEVAL_QUERY"
|
80
|
+
]).optional()
|
81
|
+
});
|
82
|
+
|
52
83
|
// src/google-generative-ai-embedding-model.ts
|
53
84
|
var GoogleGenerativeAIEmbeddingModel = class {
|
54
|
-
constructor(modelId,
|
85
|
+
constructor(modelId, config) {
|
55
86
|
this.specificationVersion = "v2";
|
87
|
+
this.maxEmbeddingsPerCall = 2048;
|
88
|
+
this.supportsParallelCalls = true;
|
56
89
|
this.modelId = modelId;
|
57
|
-
this.settings = settings;
|
58
90
|
this.config = config;
|
59
91
|
}
|
60
92
|
get provider() {
|
61
93
|
return this.config.provider;
|
62
94
|
}
|
63
|
-
get maxEmbeddingsPerCall() {
|
64
|
-
return 2048;
|
65
|
-
}
|
66
|
-
get supportsParallelCalls() {
|
67
|
-
return true;
|
68
|
-
}
|
69
95
|
async doEmbed({
|
70
96
|
values,
|
71
97
|
headers,
|
72
|
-
abortSignal
|
98
|
+
abortSignal,
|
99
|
+
providerOptions
|
73
100
|
}) {
|
101
|
+
const googleOptions = await (0, import_provider_utils2.parseProviderOptions)({
|
102
|
+
provider: "google",
|
103
|
+
providerOptions,
|
104
|
+
schema: googleGenerativeAIEmbeddingProviderOptions
|
105
|
+
});
|
74
106
|
if (values.length > this.maxEmbeddingsPerCall) {
|
75
107
|
throw new import_provider.TooManyEmbeddingValuesForCallError({
|
76
108
|
provider: this.provider,
|
@@ -83,6 +115,35 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
83
115
|
await (0, import_provider_utils2.resolve)(this.config.headers),
|
84
116
|
headers
|
85
117
|
);
|
118
|
+
if (values.length === 1) {
|
119
|
+
const {
|
120
|
+
responseHeaders: responseHeaders2,
|
121
|
+
value: response2,
|
122
|
+
rawValue: rawValue2
|
123
|
+
} = await (0, import_provider_utils2.postJsonToApi)({
|
124
|
+
url: `${this.config.baseURL}/models/${this.modelId}:embedContent`,
|
125
|
+
headers: mergedHeaders,
|
126
|
+
body: {
|
127
|
+
model: `models/${this.modelId}`,
|
128
|
+
content: {
|
129
|
+
parts: [{ text: values[0] }]
|
130
|
+
},
|
131
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
132
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
133
|
+
},
|
134
|
+
failedResponseHandler: googleFailedResponseHandler,
|
135
|
+
successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
|
136
|
+
googleGenerativeAISingleEmbeddingResponseSchema
|
137
|
+
),
|
138
|
+
abortSignal,
|
139
|
+
fetch: this.config.fetch
|
140
|
+
});
|
141
|
+
return {
|
142
|
+
embeddings: [response2.embedding.values],
|
143
|
+
usage: void 0,
|
144
|
+
response: { headers: responseHeaders2, body: rawValue2 }
|
145
|
+
};
|
146
|
+
}
|
86
147
|
const {
|
87
148
|
responseHeaders,
|
88
149
|
value: response,
|
@@ -94,7 +155,8 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
94
155
|
requests: values.map((value) => ({
|
95
156
|
model: `models/${this.modelId}`,
|
96
157
|
content: { role: "user", parts: [{ text: value }] },
|
97
|
-
outputDimensionality:
|
158
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
159
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
98
160
|
}))
|
99
161
|
},
|
100
162
|
failedResponseHandler: googleFailedResponseHandler,
|
@@ -111,13 +173,16 @@ var GoogleGenerativeAIEmbeddingModel = class {
|
|
111
173
|
};
|
112
174
|
}
|
113
175
|
};
|
114
|
-
var googleGenerativeAITextEmbeddingResponseSchema =
|
115
|
-
embeddings:
|
176
|
+
var googleGenerativeAITextEmbeddingResponseSchema = import_v43.z.object({
|
177
|
+
embeddings: import_v43.z.array(import_v43.z.object({ values: import_v43.z.array(import_v43.z.number()) }))
|
178
|
+
});
|
179
|
+
var googleGenerativeAISingleEmbeddingResponseSchema = import_v43.z.object({
|
180
|
+
embedding: import_v43.z.object({ values: import_v43.z.array(import_v43.z.number()) })
|
116
181
|
});
|
117
182
|
|
118
183
|
// src/google-generative-ai-language-model.ts
|
119
|
-
var
|
120
|
-
var
|
184
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
185
|
+
var import_v47 = require("zod/v4");
|
121
186
|
|
122
187
|
// src/convert-json-schema-to-openapi-schema.ts
|
123
188
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
@@ -213,16 +278,18 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
|
213
278
|
return result;
|
214
279
|
}
|
215
280
|
function isEmptyObjectSchema(jsonSchema) {
|
216
|
-
return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0);
|
281
|
+
return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
|
217
282
|
}
|
218
283
|
|
219
284
|
// src/convert-to-google-generative-ai-messages.ts
|
220
285
|
var import_provider2 = require("@ai-sdk/provider");
|
221
286
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
222
|
-
function convertToGoogleGenerativeAIMessages(prompt) {
|
287
|
+
function convertToGoogleGenerativeAIMessages(prompt, options) {
|
288
|
+
var _a;
|
223
289
|
const systemInstructionParts = [];
|
224
290
|
const contents = [];
|
225
291
|
let systemMessagesAllowed = true;
|
292
|
+
const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
|
226
293
|
for (const { role, content } of prompt) {
|
227
294
|
switch (role) {
|
228
295
|
case "system": {
|
@@ -296,7 +363,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
296
363
|
return {
|
297
364
|
functionCall: {
|
298
365
|
name: part.toolName,
|
299
|
-
args: part.
|
366
|
+
args: part.input
|
300
367
|
}
|
301
368
|
};
|
302
369
|
}
|
@@ -314,7 +381,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
314
381
|
name: part.toolName,
|
315
382
|
response: {
|
316
383
|
name: part.toolName,
|
317
|
-
content: part.
|
384
|
+
content: part.output.value
|
318
385
|
}
|
319
386
|
}
|
320
387
|
}))
|
@@ -323,8 +390,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
323
390
|
}
|
324
391
|
}
|
325
392
|
}
|
393
|
+
if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
|
394
|
+
const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
|
395
|
+
contents[0].parts.unshift({ text: systemText + "\n\n" });
|
396
|
+
}
|
326
397
|
return {
|
327
|
-
systemInstruction: systemInstructionParts.length > 0 ? { parts: systemInstructionParts } : void 0,
|
398
|
+
systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
|
328
399
|
contents
|
329
400
|
};
|
330
401
|
}
|
@@ -334,13 +405,73 @@ function getModelPath(modelId) {
|
|
334
405
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
335
406
|
}
|
336
407
|
|
408
|
+
// src/google-generative-ai-options.ts
|
409
|
+
var import_v44 = require("zod/v4");
|
410
|
+
var googleGenerativeAIProviderOptions = import_v44.z.object({
|
411
|
+
responseModalities: import_v44.z.array(import_v44.z.enum(["TEXT", "IMAGE"])).optional(),
|
412
|
+
thinkingConfig: import_v44.z.object({
|
413
|
+
thinkingBudget: import_v44.z.number().optional(),
|
414
|
+
includeThoughts: import_v44.z.boolean().optional()
|
415
|
+
}).optional(),
|
416
|
+
/**
|
417
|
+
Optional.
|
418
|
+
The name of the cached content used as context to serve the prediction.
|
419
|
+
Format: cachedContents/{cachedContent}
|
420
|
+
*/
|
421
|
+
cachedContent: import_v44.z.string().optional(),
|
422
|
+
/**
|
423
|
+
* Optional. Enable structured output. Default is true.
|
424
|
+
*
|
425
|
+
* This is useful when the JSON Schema contains elements that are
|
426
|
+
* not supported by the OpenAPI schema version that
|
427
|
+
* Google Generative AI uses. You can use this to disable
|
428
|
+
* structured outputs if you need to.
|
429
|
+
*/
|
430
|
+
structuredOutputs: import_v44.z.boolean().optional(),
|
431
|
+
/**
|
432
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
433
|
+
*/
|
434
|
+
safetySettings: import_v44.z.array(
|
435
|
+
import_v44.z.object({
|
436
|
+
category: import_v44.z.enum([
|
437
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
438
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
439
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
440
|
+
"HARM_CATEGORY_HARASSMENT",
|
441
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
442
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
443
|
+
]),
|
444
|
+
threshold: import_v44.z.enum([
|
445
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
446
|
+
"BLOCK_LOW_AND_ABOVE",
|
447
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
448
|
+
"BLOCK_ONLY_HIGH",
|
449
|
+
"BLOCK_NONE",
|
450
|
+
"OFF"
|
451
|
+
])
|
452
|
+
})
|
453
|
+
).optional(),
|
454
|
+
threshold: import_v44.z.enum([
|
455
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
456
|
+
"BLOCK_LOW_AND_ABOVE",
|
457
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
458
|
+
"BLOCK_ONLY_HIGH",
|
459
|
+
"BLOCK_NONE",
|
460
|
+
"OFF"
|
461
|
+
]).optional(),
|
462
|
+
/**
|
463
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
464
|
+
*
|
465
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
466
|
+
*/
|
467
|
+
audioTimestamp: import_v44.z.boolean().optional()
|
468
|
+
});
|
469
|
+
|
337
470
|
// src/google-prepare-tools.ts
|
338
471
|
var import_provider3 = require("@ai-sdk/provider");
|
339
472
|
function prepareTools({
|
340
473
|
tools,
|
341
474
|
toolChoice,
|
342
|
-
useSearchGrounding,
|
343
|
-
dynamicRetrievalConfig,
|
344
475
|
modelId
|
345
476
|
}) {
|
346
477
|
var _a;
|
@@ -348,28 +479,87 @@ function prepareTools({
|
|
348
479
|
const toolWarnings = [];
|
349
480
|
const isGemini2 = modelId.includes("gemini-2");
|
350
481
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
351
|
-
if (
|
482
|
+
if (tools == null) {
|
483
|
+
return { tools: void 0, toolConfig: void 0, toolWarnings };
|
484
|
+
}
|
485
|
+
const hasFunctionTools = tools.some((tool) => tool.type === "function");
|
486
|
+
const hasProviderDefinedTools = tools.some(
|
487
|
+
(tool) => tool.type === "provider-defined"
|
488
|
+
);
|
489
|
+
if (hasFunctionTools && hasProviderDefinedTools) {
|
490
|
+
toolWarnings.push({
|
491
|
+
type: "unsupported-tool",
|
492
|
+
tool: tools.find((tool) => tool.type === "function"),
|
493
|
+
details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
|
494
|
+
});
|
495
|
+
}
|
496
|
+
if (hasProviderDefinedTools) {
|
497
|
+
const googleTools2 = {};
|
498
|
+
const providerDefinedTools = tools.filter(
|
499
|
+
(tool) => tool.type === "provider-defined"
|
500
|
+
);
|
501
|
+
providerDefinedTools.forEach((tool) => {
|
502
|
+
switch (tool.id) {
|
503
|
+
case "google.google_search":
|
504
|
+
if (isGemini2) {
|
505
|
+
googleTools2.googleSearch = {};
|
506
|
+
} else if (supportsDynamicRetrieval) {
|
507
|
+
googleTools2.googleSearchRetrieval = {
|
508
|
+
dynamicRetrievalConfig: {
|
509
|
+
mode: tool.args.mode,
|
510
|
+
dynamicThreshold: tool.args.dynamicThreshold
|
511
|
+
}
|
512
|
+
};
|
513
|
+
} else {
|
514
|
+
googleTools2.googleSearchRetrieval = {};
|
515
|
+
}
|
516
|
+
break;
|
517
|
+
case "google.url_context":
|
518
|
+
if (isGemini2) {
|
519
|
+
googleTools2.urlContext = {};
|
520
|
+
} else {
|
521
|
+
toolWarnings.push({
|
522
|
+
type: "unsupported-tool",
|
523
|
+
tool,
|
524
|
+
details: "The URL context tool is not supported with other Gemini models than Gemini 2."
|
525
|
+
});
|
526
|
+
}
|
527
|
+
break;
|
528
|
+
case "google.code_execution":
|
529
|
+
if (isGemini2) {
|
530
|
+
googleTools2.codeExecution = {};
|
531
|
+
} else {
|
532
|
+
toolWarnings.push({
|
533
|
+
type: "unsupported-tool",
|
534
|
+
tool,
|
535
|
+
details: "The code execution tools is not supported with other Gemini models than Gemini 2."
|
536
|
+
});
|
537
|
+
}
|
538
|
+
break;
|
539
|
+
default:
|
540
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
541
|
+
break;
|
542
|
+
}
|
543
|
+
});
|
352
544
|
return {
|
353
|
-
tools:
|
354
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
|
355
|
-
},
|
545
|
+
tools: Object.keys(googleTools2).length > 0 ? googleTools2 : void 0,
|
356
546
|
toolConfig: void 0,
|
357
547
|
toolWarnings
|
358
548
|
};
|
359
549
|
}
|
360
|
-
if (tools == null) {
|
361
|
-
return { tools: void 0, toolConfig: void 0, toolWarnings };
|
362
|
-
}
|
363
550
|
const functionDeclarations = [];
|
364
551
|
for (const tool of tools) {
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
552
|
+
switch (tool.type) {
|
553
|
+
case "function":
|
554
|
+
functionDeclarations.push({
|
555
|
+
name: tool.name,
|
556
|
+
description: (_a = tool.description) != null ? _a : "",
|
557
|
+
parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
|
558
|
+
});
|
559
|
+
break;
|
560
|
+
default:
|
561
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
562
|
+
break;
|
373
563
|
}
|
374
564
|
}
|
375
565
|
if (toolChoice == null) {
|
@@ -446,23 +636,80 @@ function mapGoogleGenerativeAIFinishReason({
|
|
446
636
|
}
|
447
637
|
}
|
448
638
|
|
639
|
+
// src/tool/google-search.ts
|
640
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
641
|
+
var import_v45 = require("zod/v4");
|
642
|
+
var groundingChunkSchema = import_v45.z.object({
|
643
|
+
web: import_v45.z.object({ uri: import_v45.z.string(), title: import_v45.z.string() }).nullish(),
|
644
|
+
retrievedContext: import_v45.z.object({ uri: import_v45.z.string(), title: import_v45.z.string() }).nullish()
|
645
|
+
});
|
646
|
+
var groundingMetadataSchema = import_v45.z.object({
|
647
|
+
webSearchQueries: import_v45.z.array(import_v45.z.string()).nullish(),
|
648
|
+
retrievalQueries: import_v45.z.array(import_v45.z.string()).nullish(),
|
649
|
+
searchEntryPoint: import_v45.z.object({ renderedContent: import_v45.z.string() }).nullish(),
|
650
|
+
groundingChunks: import_v45.z.array(groundingChunkSchema).nullish(),
|
651
|
+
groundingSupports: import_v45.z.array(
|
652
|
+
import_v45.z.object({
|
653
|
+
segment: import_v45.z.object({
|
654
|
+
startIndex: import_v45.z.number().nullish(),
|
655
|
+
endIndex: import_v45.z.number().nullish(),
|
656
|
+
text: import_v45.z.string().nullish()
|
657
|
+
}),
|
658
|
+
segment_text: import_v45.z.string().nullish(),
|
659
|
+
groundingChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
|
660
|
+
supportChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
|
661
|
+
confidenceScores: import_v45.z.array(import_v45.z.number()).nullish(),
|
662
|
+
confidenceScore: import_v45.z.array(import_v45.z.number()).nullish()
|
663
|
+
})
|
664
|
+
).nullish(),
|
665
|
+
retrievalMetadata: import_v45.z.union([
|
666
|
+
import_v45.z.object({
|
667
|
+
webDynamicRetrievalScore: import_v45.z.number()
|
668
|
+
}),
|
669
|
+
import_v45.z.object({})
|
670
|
+
]).nullish()
|
671
|
+
});
|
672
|
+
var googleSearch = (0, import_provider_utils4.createProviderDefinedToolFactory)({
|
673
|
+
id: "google.google_search",
|
674
|
+
name: "google_search",
|
675
|
+
inputSchema: import_v45.z.object({
|
676
|
+
mode: import_v45.z.enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
|
677
|
+
dynamicThreshold: import_v45.z.number().default(1)
|
678
|
+
})
|
679
|
+
});
|
680
|
+
|
681
|
+
// src/tool/url-context.ts
|
682
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
683
|
+
var import_v46 = require("zod/v4");
|
684
|
+
var urlMetadataSchema = import_v46.z.object({
|
685
|
+
retrievedUrl: import_v46.z.string(),
|
686
|
+
urlRetrievalStatus: import_v46.z.string()
|
687
|
+
});
|
688
|
+
var urlContextMetadataSchema = import_v46.z.object({
|
689
|
+
urlMetadata: import_v46.z.array(urlMetadataSchema)
|
690
|
+
});
|
691
|
+
var urlContext = (0, import_provider_utils5.createProviderDefinedToolFactory)({
|
692
|
+
id: "google.url_context",
|
693
|
+
name: "url_context",
|
694
|
+
inputSchema: import_v46.z.object({})
|
695
|
+
});
|
696
|
+
|
449
697
|
// src/google-generative-ai-language-model.ts
|
450
698
|
var GoogleGenerativeAILanguageModel = class {
|
451
|
-
constructor(modelId,
|
699
|
+
constructor(modelId, config) {
|
452
700
|
this.specificationVersion = "v2";
|
453
|
-
|
454
|
-
this.supportsImageUrls = false;
|
701
|
+
var _a;
|
455
702
|
this.modelId = modelId;
|
456
|
-
this.settings = settings;
|
457
703
|
this.config = config;
|
458
|
-
|
459
|
-
get supportsStructuredOutputs() {
|
460
|
-
var _a;
|
461
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
704
|
+
this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils6.generateId;
|
462
705
|
}
|
463
706
|
get provider() {
|
464
707
|
return this.config.provider;
|
465
708
|
}
|
709
|
+
get supportedUrls() {
|
710
|
+
var _a, _b, _c;
|
711
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
712
|
+
}
|
466
713
|
async getArgs({
|
467
714
|
prompt,
|
468
715
|
maxOutputTokens,
|
@@ -478,23 +725,31 @@ var GoogleGenerativeAILanguageModel = class {
|
|
478
725
|
toolChoice,
|
479
726
|
providerOptions
|
480
727
|
}) {
|
481
|
-
var _a;
|
728
|
+
var _a, _b;
|
482
729
|
const warnings = [];
|
483
|
-
const googleOptions = (0,
|
730
|
+
const googleOptions = await (0, import_provider_utils6.parseProviderOptions)({
|
484
731
|
provider: "google",
|
485
732
|
providerOptions,
|
486
|
-
schema:
|
733
|
+
schema: googleGenerativeAIProviderOptions
|
487
734
|
});
|
488
|
-
|
735
|
+
if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
|
736
|
+
warnings.push({
|
737
|
+
type: "other",
|
738
|
+
message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
|
739
|
+
});
|
740
|
+
}
|
741
|
+
const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
|
742
|
+
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
|
743
|
+
prompt,
|
744
|
+
{ isGemmaModel }
|
745
|
+
);
|
489
746
|
const {
|
490
|
-
tools:
|
747
|
+
tools: googleTools2,
|
491
748
|
toolConfig: googleToolConfig,
|
492
749
|
toolWarnings
|
493
750
|
} = prepareTools({
|
494
751
|
tools,
|
495
752
|
toolChoice,
|
496
|
-
useSearchGrounding: (_a = this.settings.useSearchGrounding) != null ? _a : false,
|
497
|
-
dynamicRetrievalConfig: this.settings.dynamicRetrievalConfig,
|
498
753
|
modelId: this.modelId
|
499
754
|
});
|
500
755
|
return {
|
@@ -513,83 +768,127 @@ var GoogleGenerativeAILanguageModel = class {
|
|
513
768
|
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
514
769
|
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
515
770
|
// so this is needed as an escape hatch:
|
516
|
-
|
517
|
-
|
518
|
-
|
771
|
+
// TODO convert into provider option
|
772
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
773
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
774
|
+
audioTimestamp: googleOptions.audioTimestamp
|
519
775
|
},
|
520
776
|
// provider options:
|
521
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities
|
777
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
778
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
522
779
|
},
|
523
780
|
contents,
|
524
|
-
systemInstruction,
|
525
|
-
safetySettings:
|
526
|
-
tools:
|
781
|
+
systemInstruction: isGemmaModel ? void 0 : systemInstruction,
|
782
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
783
|
+
tools: googleTools2,
|
527
784
|
toolConfig: googleToolConfig,
|
528
|
-
cachedContent:
|
785
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
529
786
|
},
|
530
787
|
warnings: [...warnings, ...toolWarnings]
|
531
788
|
};
|
532
789
|
}
|
533
|
-
supportsUrl(url) {
|
534
|
-
return this.config.isSupportedUrl(url);
|
535
|
-
}
|
536
790
|
async doGenerate(options) {
|
537
|
-
var _a, _b, _c, _d, _e;
|
791
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
538
792
|
const { args, warnings } = await this.getArgs(options);
|
539
793
|
const body = JSON.stringify(args);
|
540
|
-
const mergedHeaders = (0,
|
541
|
-
await (0,
|
794
|
+
const mergedHeaders = (0, import_provider_utils6.combineHeaders)(
|
795
|
+
await (0, import_provider_utils6.resolve)(this.config.headers),
|
542
796
|
options.headers
|
543
797
|
);
|
544
798
|
const {
|
545
799
|
responseHeaders,
|
546
800
|
value: response,
|
547
801
|
rawValue: rawResponse
|
548
|
-
} = await (0,
|
802
|
+
} = await (0, import_provider_utils6.postJsonToApi)({
|
549
803
|
url: `${this.config.baseURL}/${getModelPath(
|
550
804
|
this.modelId
|
551
805
|
)}:generateContent`,
|
552
806
|
headers: mergedHeaders,
|
553
807
|
body: args,
|
554
808
|
failedResponseHandler: googleFailedResponseHandler,
|
555
|
-
successfulResponseHandler: (0,
|
809
|
+
successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(responseSchema),
|
556
810
|
abortSignal: options.abortSignal,
|
557
811
|
fetch: this.config.fetch
|
558
812
|
});
|
559
813
|
const candidate = response.candidates[0];
|
560
|
-
const
|
561
|
-
const
|
562
|
-
parts,
|
563
|
-
generateId: this.config.generateId
|
564
|
-
});
|
814
|
+
const content = [];
|
815
|
+
const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
|
565
816
|
const usageMetadata = response.usageMetadata;
|
817
|
+
let lastCodeExecutionToolCallId;
|
818
|
+
for (const part of parts) {
|
819
|
+
if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
|
820
|
+
const toolCallId = this.config.generateId();
|
821
|
+
lastCodeExecutionToolCallId = toolCallId;
|
822
|
+
content.push({
|
823
|
+
type: "tool-call",
|
824
|
+
toolCallId,
|
825
|
+
toolName: "code_execution",
|
826
|
+
input: JSON.stringify(part.executableCode),
|
827
|
+
providerExecuted: true
|
828
|
+
});
|
829
|
+
} else if ("codeExecutionResult" in part && part.codeExecutionResult) {
|
830
|
+
content.push({
|
831
|
+
type: "tool-result",
|
832
|
+
// Assumes a result directly follows its corresponding call part.
|
833
|
+
toolCallId: lastCodeExecutionToolCallId,
|
834
|
+
toolName: "code_execution",
|
835
|
+
result: {
|
836
|
+
outcome: part.codeExecutionResult.outcome,
|
837
|
+
output: part.codeExecutionResult.output
|
838
|
+
},
|
839
|
+
providerExecuted: true
|
840
|
+
});
|
841
|
+
lastCodeExecutionToolCallId = void 0;
|
842
|
+
} else if ("text" in part && part.text != null && part.text.length > 0) {
|
843
|
+
if (part.thought === true) {
|
844
|
+
content.push({ type: "reasoning", text: part.text });
|
845
|
+
} else {
|
846
|
+
content.push({ type: "text", text: part.text });
|
847
|
+
}
|
848
|
+
} else if ("functionCall" in part) {
|
849
|
+
content.push({
|
850
|
+
type: "tool-call",
|
851
|
+
toolCallId: this.config.generateId(),
|
852
|
+
toolName: part.functionCall.name,
|
853
|
+
input: JSON.stringify(part.functionCall.args)
|
854
|
+
});
|
855
|
+
} else if ("inlineData" in part) {
|
856
|
+
content.push({
|
857
|
+
type: "file",
|
858
|
+
data: part.inlineData.data,
|
859
|
+
mediaType: part.inlineData.mimeType
|
860
|
+
});
|
861
|
+
}
|
862
|
+
}
|
863
|
+
const sources = (_d = extractSources({
|
864
|
+
groundingMetadata: candidate.groundingMetadata,
|
865
|
+
generateId: this.config.generateId
|
866
|
+
})) != null ? _d : [];
|
867
|
+
for (const source of sources) {
|
868
|
+
content.push(source);
|
869
|
+
}
|
566
870
|
return {
|
567
|
-
|
568
|
-
files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
|
569
|
-
type: "file",
|
570
|
-
data: part.inlineData.data,
|
571
|
-
mediaType: part.inlineData.mimeType
|
572
|
-
})),
|
573
|
-
toolCalls,
|
871
|
+
content,
|
574
872
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
575
873
|
finishReason: candidate.finishReason,
|
576
|
-
hasToolCalls:
|
874
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
577
875
|
}),
|
578
876
|
usage: {
|
579
|
-
inputTokens: (
|
580
|
-
outputTokens: (
|
877
|
+
inputTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _e : void 0,
|
878
|
+
outputTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _f : void 0,
|
879
|
+
totalTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _g : void 0,
|
880
|
+
reasoningTokens: (_h = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _h : void 0,
|
881
|
+
cachedInputTokens: (_i = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _i : void 0
|
581
882
|
},
|
582
883
|
warnings,
|
583
884
|
providerMetadata: {
|
584
885
|
google: {
|
585
|
-
groundingMetadata: (
|
586
|
-
|
886
|
+
groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
|
887
|
+
urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
|
888
|
+
safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null,
|
889
|
+
usageMetadata: usageMetadata != null ? usageMetadata : null
|
587
890
|
}
|
588
891
|
},
|
589
|
-
sources: extractSources({
|
590
|
-
groundingMetadata: candidate.groundingMetadata,
|
591
|
-
generateId: this.config.generateId
|
592
|
-
}),
|
593
892
|
request: { body },
|
594
893
|
response: {
|
595
894
|
// TODO timestamp, model id, id
|
@@ -601,34 +900,46 @@ var GoogleGenerativeAILanguageModel = class {
|
|
601
900
|
async doStream(options) {
|
602
901
|
const { args, warnings } = await this.getArgs(options);
|
603
902
|
const body = JSON.stringify(args);
|
604
|
-
const headers = (0,
|
605
|
-
await (0,
|
903
|
+
const headers = (0, import_provider_utils6.combineHeaders)(
|
904
|
+
await (0, import_provider_utils6.resolve)(this.config.headers),
|
606
905
|
options.headers
|
607
906
|
);
|
608
|
-
const { responseHeaders, value: response } = await (0,
|
907
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
|
609
908
|
url: `${this.config.baseURL}/${getModelPath(
|
610
909
|
this.modelId
|
611
910
|
)}:streamGenerateContent?alt=sse`,
|
612
911
|
headers,
|
613
912
|
body: args,
|
614
913
|
failedResponseHandler: googleFailedResponseHandler,
|
615
|
-
successfulResponseHandler: (0,
|
914
|
+
successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(chunkSchema),
|
616
915
|
abortSignal: options.abortSignal,
|
617
916
|
fetch: this.config.fetch
|
618
917
|
});
|
619
918
|
let finishReason = "unknown";
|
620
919
|
const usage = {
|
621
920
|
inputTokens: void 0,
|
622
|
-
outputTokens: void 0
|
921
|
+
outputTokens: void 0,
|
922
|
+
totalTokens: void 0
|
623
923
|
};
|
624
924
|
let providerMetadata = void 0;
|
625
|
-
const
|
925
|
+
const generateId3 = this.config.generateId;
|
626
926
|
let hasToolCalls = false;
|
927
|
+
let currentTextBlockId = null;
|
928
|
+
let currentReasoningBlockId = null;
|
929
|
+
let blockCounter = 0;
|
930
|
+
const emittedSourceUrls = /* @__PURE__ */ new Set();
|
931
|
+
let lastCodeExecutionToolCallId;
|
627
932
|
return {
|
628
933
|
stream: response.pipeThrough(
|
629
934
|
new TransformStream({
|
935
|
+
start(controller) {
|
936
|
+
controller.enqueue({ type: "stream-start", warnings });
|
937
|
+
},
|
630
938
|
transform(chunk, controller) {
|
631
|
-
var _a, _b, _c, _d, _e, _f;
|
939
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
940
|
+
if (options.includeRawChunks) {
|
941
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
942
|
+
}
|
632
943
|
if (!chunk.success) {
|
633
944
|
controller.enqueue({ type: "error", error: chunk.error });
|
634
945
|
return;
|
@@ -638,16 +949,99 @@ var GoogleGenerativeAILanguageModel = class {
|
|
638
949
|
if (usageMetadata != null) {
|
639
950
|
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
640
951
|
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
952
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
953
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
954
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
641
955
|
}
|
642
|
-
const candidate = (
|
956
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
643
957
|
if (candidate == null) {
|
644
958
|
return;
|
645
959
|
}
|
646
960
|
const content = candidate.content;
|
961
|
+
const sources = extractSources({
|
962
|
+
groundingMetadata: candidate.groundingMetadata,
|
963
|
+
generateId: generateId3
|
964
|
+
});
|
965
|
+
if (sources != null) {
|
966
|
+
for (const source of sources) {
|
967
|
+
if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
|
968
|
+
emittedSourceUrls.add(source.url);
|
969
|
+
controller.enqueue(source);
|
970
|
+
}
|
971
|
+
}
|
972
|
+
}
|
647
973
|
if (content != null) {
|
648
|
-
const
|
649
|
-
|
650
|
-
|
974
|
+
const parts = (_g = content.parts) != null ? _g : [];
|
975
|
+
for (const part of parts) {
|
976
|
+
if ("executableCode" in part && ((_h = part.executableCode) == null ? void 0 : _h.code)) {
|
977
|
+
const toolCallId = generateId3();
|
978
|
+
lastCodeExecutionToolCallId = toolCallId;
|
979
|
+
controller.enqueue({
|
980
|
+
type: "tool-call",
|
981
|
+
toolCallId,
|
982
|
+
toolName: "code_execution",
|
983
|
+
input: JSON.stringify(part.executableCode),
|
984
|
+
providerExecuted: true
|
985
|
+
});
|
986
|
+
hasToolCalls = true;
|
987
|
+
} else if ("codeExecutionResult" in part && part.codeExecutionResult) {
|
988
|
+
const toolCallId = lastCodeExecutionToolCallId;
|
989
|
+
if (toolCallId) {
|
990
|
+
controller.enqueue({
|
991
|
+
type: "tool-result",
|
992
|
+
toolCallId,
|
993
|
+
toolName: "code_execution",
|
994
|
+
result: {
|
995
|
+
outcome: part.codeExecutionResult.outcome,
|
996
|
+
output: part.codeExecutionResult.output
|
997
|
+
},
|
998
|
+
providerExecuted: true
|
999
|
+
});
|
1000
|
+
lastCodeExecutionToolCallId = void 0;
|
1001
|
+
}
|
1002
|
+
} else if ("text" in part && part.text != null && part.text.length > 0) {
|
1003
|
+
if (part.thought === true) {
|
1004
|
+
if (currentTextBlockId !== null) {
|
1005
|
+
controller.enqueue({
|
1006
|
+
type: "text-end",
|
1007
|
+
id: currentTextBlockId
|
1008
|
+
});
|
1009
|
+
currentTextBlockId = null;
|
1010
|
+
}
|
1011
|
+
if (currentReasoningBlockId === null) {
|
1012
|
+
currentReasoningBlockId = String(blockCounter++);
|
1013
|
+
controller.enqueue({
|
1014
|
+
type: "reasoning-start",
|
1015
|
+
id: currentReasoningBlockId
|
1016
|
+
});
|
1017
|
+
}
|
1018
|
+
controller.enqueue({
|
1019
|
+
type: "reasoning-delta",
|
1020
|
+
id: currentReasoningBlockId,
|
1021
|
+
delta: part.text
|
1022
|
+
});
|
1023
|
+
} else {
|
1024
|
+
if (currentReasoningBlockId !== null) {
|
1025
|
+
controller.enqueue({
|
1026
|
+
type: "reasoning-end",
|
1027
|
+
id: currentReasoningBlockId
|
1028
|
+
});
|
1029
|
+
currentReasoningBlockId = null;
|
1030
|
+
}
|
1031
|
+
if (currentTextBlockId === null) {
|
1032
|
+
currentTextBlockId = String(blockCounter++);
|
1033
|
+
controller.enqueue({
|
1034
|
+
type: "text-start",
|
1035
|
+
id: currentTextBlockId
|
1036
|
+
});
|
1037
|
+
}
|
1038
|
+
controller.enqueue({
|
1039
|
+
type: "text-delta",
|
1040
|
+
id: currentTextBlockId,
|
1041
|
+
delta: part.text
|
1042
|
+
});
|
1043
|
+
}
|
1044
|
+
}
|
651
1045
|
}
|
652
1046
|
const inlineDataParts = getInlineDataParts(content.parts);
|
653
1047
|
if (inlineDataParts != null) {
|
@@ -661,23 +1055,29 @@ var GoogleGenerativeAILanguageModel = class {
|
|
661
1055
|
}
|
662
1056
|
const toolCallDeltas = getToolCallsFromParts({
|
663
1057
|
parts: content.parts,
|
664
|
-
generateId:
|
1058
|
+
generateId: generateId3
|
665
1059
|
});
|
666
1060
|
if (toolCallDeltas != null) {
|
667
1061
|
for (const toolCall of toolCallDeltas) {
|
668
1062
|
controller.enqueue({
|
669
|
-
type: "tool-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
1063
|
+
type: "tool-input-start",
|
1064
|
+
id: toolCall.toolCallId,
|
1065
|
+
toolName: toolCall.toolName
|
1066
|
+
});
|
1067
|
+
controller.enqueue({
|
1068
|
+
type: "tool-input-delta",
|
1069
|
+
id: toolCall.toolCallId,
|
1070
|
+
delta: toolCall.args
|
1071
|
+
});
|
1072
|
+
controller.enqueue({
|
1073
|
+
type: "tool-input-end",
|
1074
|
+
id: toolCall.toolCallId
|
674
1075
|
});
|
675
1076
|
controller.enqueue({
|
676
1077
|
type: "tool-call",
|
677
|
-
toolCallType: "function",
|
678
1078
|
toolCallId: toolCall.toolCallId,
|
679
1079
|
toolName: toolCall.toolName,
|
680
|
-
|
1080
|
+
input: toolCall.args
|
681
1081
|
});
|
682
1082
|
hasToolCalls = true;
|
683
1083
|
}
|
@@ -688,22 +1088,31 @@ var GoogleGenerativeAILanguageModel = class {
|
|
688
1088
|
finishReason: candidate.finishReason,
|
689
1089
|
hasToolCalls
|
690
1090
|
});
|
691
|
-
const sources = (_d = extractSources({
|
692
|
-
groundingMetadata: candidate.groundingMetadata,
|
693
|
-
generateId: generateId2
|
694
|
-
})) != null ? _d : [];
|
695
|
-
for (const source of sources) {
|
696
|
-
controller.enqueue(source);
|
697
|
-
}
|
698
1091
|
providerMetadata = {
|
699
1092
|
google: {
|
700
|
-
groundingMetadata: (
|
701
|
-
|
1093
|
+
groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
|
1094
|
+
urlContextMetadata: (_j = candidate.urlContextMetadata) != null ? _j : null,
|
1095
|
+
safetyRatings: (_k = candidate.safetyRatings) != null ? _k : null
|
702
1096
|
}
|
703
1097
|
};
|
1098
|
+
if (usageMetadata != null) {
|
1099
|
+
providerMetadata.google.usageMetadata = usageMetadata;
|
1100
|
+
}
|
704
1101
|
}
|
705
1102
|
},
|
706
1103
|
flush(controller) {
|
1104
|
+
if (currentTextBlockId !== null) {
|
1105
|
+
controller.enqueue({
|
1106
|
+
type: "text-end",
|
1107
|
+
id: currentTextBlockId
|
1108
|
+
});
|
1109
|
+
}
|
1110
|
+
if (currentReasoningBlockId !== null) {
|
1111
|
+
controller.enqueue({
|
1112
|
+
type: "reasoning-end",
|
1113
|
+
id: currentReasoningBlockId
|
1114
|
+
});
|
1115
|
+
}
|
707
1116
|
controller.enqueue({
|
708
1117
|
type: "finish",
|
709
1118
|
finishReason,
|
@@ -714,33 +1123,24 @@ var GoogleGenerativeAILanguageModel = class {
|
|
714
1123
|
})
|
715
1124
|
),
|
716
1125
|
response: { headers: responseHeaders },
|
717
|
-
warnings,
|
718
1126
|
request: { body }
|
719
1127
|
};
|
720
1128
|
}
|
721
1129
|
};
|
722
1130
|
function getToolCallsFromParts({
|
723
1131
|
parts,
|
724
|
-
generateId:
|
1132
|
+
generateId: generateId3
|
725
1133
|
}) {
|
726
1134
|
const functionCallParts = parts == null ? void 0 : parts.filter(
|
727
1135
|
(part) => "functionCall" in part
|
728
1136
|
);
|
729
1137
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
730
1138
|
type: "tool-call",
|
731
|
-
|
732
|
-
toolCallId: generateId2(),
|
1139
|
+
toolCallId: generateId3(),
|
733
1140
|
toolName: part.functionCall.name,
|
734
1141
|
args: JSON.stringify(part.functionCall.args)
|
735
1142
|
}));
|
736
1143
|
}
|
737
|
-
function getTextFromParts(parts) {
|
738
|
-
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
739
|
-
return textParts == null || textParts.length === 0 ? void 0 : {
|
740
|
-
type: "text",
|
741
|
-
text: textParts.map((part) => part.text).join("")
|
742
|
-
};
|
743
|
-
}
|
744
1144
|
function getInlineDataParts(parts) {
|
745
1145
|
return parts == null ? void 0 : parts.filter(
|
746
1146
|
(part) => "inlineData" in part
|
@@ -748,7 +1148,7 @@ function getInlineDataParts(parts) {
|
|
748
1148
|
}
|
749
1149
|
function extractSources({
|
750
1150
|
groundingMetadata,
|
751
|
-
generateId:
|
1151
|
+
generateId: generateId3
|
752
1152
|
}) {
|
753
1153
|
var _a;
|
754
1154
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
@@ -756,146 +1156,273 @@ function extractSources({
|
|
756
1156
|
).map((chunk) => ({
|
757
1157
|
type: "source",
|
758
1158
|
sourceType: "url",
|
759
|
-
id:
|
1159
|
+
id: generateId3(),
|
760
1160
|
url: chunk.web.uri,
|
761
1161
|
title: chunk.web.title
|
762
1162
|
}));
|
763
1163
|
}
|
764
|
-
var contentSchema =
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
functionCall: import_zod3.z.object({
|
773
|
-
name: import_zod3.z.string(),
|
774
|
-
args: import_zod3.z.unknown()
|
1164
|
+
var contentSchema = import_v47.z.object({
|
1165
|
+
parts: import_v47.z.array(
|
1166
|
+
import_v47.z.union([
|
1167
|
+
// note: order matters since text can be fully empty
|
1168
|
+
import_v47.z.object({
|
1169
|
+
functionCall: import_v47.z.object({
|
1170
|
+
name: import_v47.z.string(),
|
1171
|
+
args: import_v47.z.unknown()
|
775
1172
|
})
|
776
1173
|
}),
|
777
|
-
|
778
|
-
inlineData:
|
779
|
-
mimeType:
|
780
|
-
data:
|
1174
|
+
import_v47.z.object({
|
1175
|
+
inlineData: import_v47.z.object({
|
1176
|
+
mimeType: import_v47.z.string(),
|
1177
|
+
data: import_v47.z.string()
|
781
1178
|
})
|
1179
|
+
}),
|
1180
|
+
import_v47.z.object({
|
1181
|
+
executableCode: import_v47.z.object({
|
1182
|
+
language: import_v47.z.string(),
|
1183
|
+
code: import_v47.z.string()
|
1184
|
+
}).nullish(),
|
1185
|
+
codeExecutionResult: import_v47.z.object({
|
1186
|
+
outcome: import_v47.z.string(),
|
1187
|
+
output: import_v47.z.string()
|
1188
|
+
}).nullish(),
|
1189
|
+
text: import_v47.z.string().nullish(),
|
1190
|
+
thought: import_v47.z.boolean().nullish()
|
782
1191
|
})
|
783
1192
|
])
|
784
1193
|
).nullish()
|
785
1194
|
});
|
786
|
-
var
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
searchEntryPoint: import_zod3.z.object({ renderedContent: import_zod3.z.string() }).nullish(),
|
794
|
-
groundingChunks: import_zod3.z.array(groundingChunkSchema).nullish(),
|
795
|
-
groundingSupports: import_zod3.z.array(
|
796
|
-
import_zod3.z.object({
|
797
|
-
segment: import_zod3.z.object({
|
798
|
-
startIndex: import_zod3.z.number().nullish(),
|
799
|
-
endIndex: import_zod3.z.number().nullish(),
|
800
|
-
text: import_zod3.z.string().nullish()
|
801
|
-
}),
|
802
|
-
segment_text: import_zod3.z.string().nullish(),
|
803
|
-
groundingChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
804
|
-
supportChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
805
|
-
confidenceScores: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
806
|
-
confidenceScore: import_zod3.z.array(import_zod3.z.number()).nullish()
|
807
|
-
})
|
808
|
-
).nullish(),
|
809
|
-
retrievalMetadata: import_zod3.z.union([
|
810
|
-
import_zod3.z.object({
|
811
|
-
webDynamicRetrievalScore: import_zod3.z.number()
|
812
|
-
}),
|
813
|
-
import_zod3.z.object({})
|
814
|
-
]).nullish()
|
1195
|
+
var safetyRatingSchema = import_v47.z.object({
|
1196
|
+
category: import_v47.z.string().nullish(),
|
1197
|
+
probability: import_v47.z.string().nullish(),
|
1198
|
+
probabilityScore: import_v47.z.number().nullish(),
|
1199
|
+
severity: import_v47.z.string().nullish(),
|
1200
|
+
severityScore: import_v47.z.number().nullish(),
|
1201
|
+
blocked: import_v47.z.boolean().nullish()
|
815
1202
|
});
|
816
|
-
var
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
blocked: import_zod3.z.boolean().nullish()
|
1203
|
+
var usageSchema = import_v47.z.object({
|
1204
|
+
cachedContentTokenCount: import_v47.z.number().nullish(),
|
1205
|
+
thoughtsTokenCount: import_v47.z.number().nullish(),
|
1206
|
+
promptTokenCount: import_v47.z.number().nullish(),
|
1207
|
+
candidatesTokenCount: import_v47.z.number().nullish(),
|
1208
|
+
totalTokenCount: import_v47.z.number().nullish()
|
823
1209
|
});
|
824
|
-
var responseSchema =
|
825
|
-
candidates:
|
826
|
-
|
827
|
-
content: contentSchema.nullish().or(
|
828
|
-
finishReason:
|
829
|
-
safetyRatings:
|
830
|
-
groundingMetadata: groundingMetadataSchema.nullish()
|
1210
|
+
var responseSchema = import_v47.z.object({
|
1211
|
+
candidates: import_v47.z.array(
|
1212
|
+
import_v47.z.object({
|
1213
|
+
content: contentSchema.nullish().or(import_v47.z.object({}).strict()),
|
1214
|
+
finishReason: import_v47.z.string().nullish(),
|
1215
|
+
safetyRatings: import_v47.z.array(safetyRatingSchema).nullish(),
|
1216
|
+
groundingMetadata: groundingMetadataSchema.nullish(),
|
1217
|
+
urlContextMetadata: urlContextMetadataSchema.nullish()
|
831
1218
|
})
|
832
1219
|
),
|
833
|
-
usageMetadata:
|
834
|
-
promptTokenCount: import_zod3.z.number().nullish(),
|
835
|
-
candidatesTokenCount: import_zod3.z.number().nullish(),
|
836
|
-
totalTokenCount: import_zod3.z.number().nullish()
|
837
|
-
}).nullish()
|
1220
|
+
usageMetadata: usageSchema.nullish()
|
838
1221
|
});
|
839
|
-
var chunkSchema =
|
840
|
-
candidates:
|
841
|
-
|
1222
|
+
var chunkSchema = import_v47.z.object({
|
1223
|
+
candidates: import_v47.z.array(
|
1224
|
+
import_v47.z.object({
|
842
1225
|
content: contentSchema.nullish(),
|
843
|
-
finishReason:
|
844
|
-
safetyRatings:
|
845
|
-
groundingMetadata: groundingMetadataSchema.nullish()
|
1226
|
+
finishReason: import_v47.z.string().nullish(),
|
1227
|
+
safetyRatings: import_v47.z.array(safetyRatingSchema).nullish(),
|
1228
|
+
groundingMetadata: groundingMetadataSchema.nullish(),
|
1229
|
+
urlContextMetadata: urlContextMetadataSchema.nullish()
|
846
1230
|
})
|
847
1231
|
).nullish(),
|
848
|
-
usageMetadata:
|
849
|
-
promptTokenCount: import_zod3.z.number().nullish(),
|
850
|
-
candidatesTokenCount: import_zod3.z.number().nullish(),
|
851
|
-
totalTokenCount: import_zod3.z.number().nullish()
|
852
|
-
}).nullish()
|
1232
|
+
usageMetadata: usageSchema.nullish()
|
853
1233
|
});
|
854
|
-
|
855
|
-
|
1234
|
+
|
1235
|
+
// src/tool/code-execution.ts
|
1236
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
1237
|
+
var import_v48 = require("zod/v4");
|
1238
|
+
var codeExecution = (0, import_provider_utils7.createProviderDefinedToolFactoryWithOutputSchema)({
|
1239
|
+
id: "google.code_execution",
|
1240
|
+
name: "code_execution",
|
1241
|
+
inputSchema: import_v48.z.object({
|
1242
|
+
language: import_v48.z.string().describe("The programming language of the code."),
|
1243
|
+
code: import_v48.z.string().describe("The code to be executed.")
|
1244
|
+
}),
|
1245
|
+
outputSchema: import_v48.z.object({
|
1246
|
+
outcome: import_v48.z.string().describe('The outcome of the execution (e.g., "OUTCOME_OK").'),
|
1247
|
+
output: import_v48.z.string().describe("The output from the code execution.")
|
1248
|
+
})
|
856
1249
|
});
|
857
1250
|
|
858
|
-
// src/google-
|
859
|
-
|
860
|
-
|
861
|
-
|
1251
|
+
// src/google-tools.ts
|
1252
|
+
var googleTools = {
|
1253
|
+
/**
|
1254
|
+
* Creates a Google search tool that gives Google direct access to real-time web content.
|
1255
|
+
* Must have name "google_search".
|
1256
|
+
*/
|
1257
|
+
googleSearch,
|
1258
|
+
/**
|
1259
|
+
* Creates a URL context tool that gives Google direct access to real-time web content.
|
1260
|
+
* Must have name "url_context".
|
1261
|
+
*/
|
1262
|
+
urlContext,
|
1263
|
+
/**
|
1264
|
+
* A tool that enables the model to generate and run Python code.
|
1265
|
+
* Must have name "code_execution".
|
1266
|
+
*
|
1267
|
+
* @note Ensure the selected model supports Code Execution.
|
1268
|
+
* Multi-tool usage with the code execution tool is typically compatible with Gemini >=2 models.
|
1269
|
+
*
|
1270
|
+
* @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)
|
1271
|
+
* @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)
|
1272
|
+
*/
|
1273
|
+
codeExecution
|
1274
|
+
};
|
1275
|
+
|
1276
|
+
// src/google-generative-ai-image-model.ts
|
1277
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
1278
|
+
var import_v49 = require("zod/v4");
|
1279
|
+
var GoogleGenerativeAIImageModel = class {
|
1280
|
+
constructor(modelId, settings, config) {
|
1281
|
+
this.modelId = modelId;
|
1282
|
+
this.settings = settings;
|
1283
|
+
this.config = config;
|
1284
|
+
this.specificationVersion = "v2";
|
1285
|
+
}
|
1286
|
+
get maxImagesPerCall() {
|
1287
|
+
var _a;
|
1288
|
+
return (_a = this.settings.maxImagesPerCall) != null ? _a : 4;
|
1289
|
+
}
|
1290
|
+
get provider() {
|
1291
|
+
return this.config.provider;
|
1292
|
+
}
|
1293
|
+
async doGenerate(options) {
|
1294
|
+
var _a, _b, _c;
|
1295
|
+
const {
|
1296
|
+
prompt,
|
1297
|
+
n = 1,
|
1298
|
+
size = "1024x1024",
|
1299
|
+
aspectRatio = "1:1",
|
1300
|
+
seed,
|
1301
|
+
providerOptions,
|
1302
|
+
headers,
|
1303
|
+
abortSignal
|
1304
|
+
} = options;
|
1305
|
+
const warnings = [];
|
1306
|
+
if (size != null) {
|
1307
|
+
warnings.push({
|
1308
|
+
type: "unsupported-setting",
|
1309
|
+
setting: "size",
|
1310
|
+
details: "This model does not support the `size` option. Use `aspectRatio` instead."
|
1311
|
+
});
|
1312
|
+
}
|
1313
|
+
if (seed != null) {
|
1314
|
+
warnings.push({
|
1315
|
+
type: "unsupported-setting",
|
1316
|
+
setting: "seed",
|
1317
|
+
details: "This model does not support the `seed` option through this provider."
|
1318
|
+
});
|
1319
|
+
}
|
1320
|
+
const googleOptions = await (0, import_provider_utils8.parseProviderOptions)({
|
1321
|
+
provider: "google",
|
1322
|
+
providerOptions,
|
1323
|
+
schema: googleImageProviderOptionsSchema
|
1324
|
+
});
|
1325
|
+
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
1326
|
+
const parameters = {
|
1327
|
+
sampleCount: n
|
1328
|
+
};
|
1329
|
+
if (aspectRatio != null) {
|
1330
|
+
parameters.aspectRatio = aspectRatio;
|
1331
|
+
}
|
1332
|
+
if (googleOptions) {
|
1333
|
+
Object.assign(parameters, googleOptions);
|
1334
|
+
}
|
1335
|
+
const body = {
|
1336
|
+
instances: [{ prompt }],
|
1337
|
+
parameters
|
1338
|
+
};
|
1339
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
|
1340
|
+
url: `${this.config.baseURL}/models/${this.modelId}:predict`,
|
1341
|
+
headers: (0, import_provider_utils8.combineHeaders)(await (0, import_provider_utils8.resolve)(this.config.headers), headers),
|
1342
|
+
body,
|
1343
|
+
failedResponseHandler: googleFailedResponseHandler,
|
1344
|
+
successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
|
1345
|
+
googleImageResponseSchema
|
1346
|
+
),
|
1347
|
+
abortSignal,
|
1348
|
+
fetch: this.config.fetch
|
1349
|
+
});
|
1350
|
+
return {
|
1351
|
+
images: response.predictions.map(
|
1352
|
+
(p) => p.bytesBase64Encoded
|
1353
|
+
),
|
1354
|
+
warnings: warnings != null ? warnings : [],
|
1355
|
+
providerMetadata: {
|
1356
|
+
google: {
|
1357
|
+
images: response.predictions.map((prediction) => ({
|
1358
|
+
// Add any prediction-specific metadata here
|
1359
|
+
}))
|
1360
|
+
}
|
1361
|
+
},
|
1362
|
+
response: {
|
1363
|
+
timestamp: currentDate,
|
1364
|
+
modelId: this.modelId,
|
1365
|
+
headers: responseHeaders
|
1366
|
+
}
|
1367
|
+
};
|
1368
|
+
}
|
1369
|
+
};
|
1370
|
+
var googleImageResponseSchema = import_v49.z.object({
|
1371
|
+
predictions: import_v49.z.array(import_v49.z.object({ bytesBase64Encoded: import_v49.z.string() })).default([])
|
1372
|
+
});
|
1373
|
+
var googleImageProviderOptionsSchema = import_v49.z.object({
|
1374
|
+
personGeneration: import_v49.z.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
|
1375
|
+
aspectRatio: import_v49.z.enum(["1:1", "3:4", "4:3", "9:16", "16:9"]).nullish()
|
1376
|
+
});
|
862
1377
|
|
863
1378
|
// src/google-provider.ts
|
864
1379
|
function createGoogleGenerativeAI(options = {}) {
|
865
1380
|
var _a;
|
866
|
-
const baseURL = (_a = (0,
|
1381
|
+
const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
|
867
1382
|
const getHeaders = () => ({
|
868
|
-
"x-goog-api-key": (0,
|
1383
|
+
"x-goog-api-key": (0, import_provider_utils9.loadApiKey)({
|
869
1384
|
apiKey: options.apiKey,
|
870
1385
|
environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
|
871
1386
|
description: "Google Generative AI"
|
872
1387
|
}),
|
873
1388
|
...options.headers
|
874
1389
|
});
|
875
|
-
const createChatModel = (modelId
|
1390
|
+
const createChatModel = (modelId) => {
|
876
1391
|
var _a2;
|
877
|
-
return new GoogleGenerativeAILanguageModel(modelId,
|
1392
|
+
return new GoogleGenerativeAILanguageModel(modelId, {
|
878
1393
|
provider: "google.generative-ai",
|
879
1394
|
baseURL,
|
880
1395
|
headers: getHeaders,
|
881
|
-
generateId: (_a2 = options.generateId) != null ? _a2 :
|
882
|
-
|
1396
|
+
generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils9.generateId,
|
1397
|
+
supportedUrls: () => ({
|
1398
|
+
"*": [
|
1399
|
+
// Only allow requests to the Google Generative Language "files" endpoint
|
1400
|
+
// e.g. https://generativelanguage.googleapis.com/v1beta/files/...
|
1401
|
+
new RegExp(`^${baseURL}/files/.*$`)
|
1402
|
+
]
|
1403
|
+
}),
|
883
1404
|
fetch: options.fetch
|
884
1405
|
});
|
885
1406
|
};
|
886
|
-
const createEmbeddingModel = (modelId
|
1407
|
+
const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
|
887
1408
|
provider: "google.generative-ai",
|
888
1409
|
baseURL,
|
889
1410
|
headers: getHeaders,
|
890
1411
|
fetch: options.fetch
|
891
1412
|
});
|
892
|
-
const
|
1413
|
+
const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
|
1414
|
+
provider: "google.generative-ai",
|
1415
|
+
baseURL,
|
1416
|
+
headers: getHeaders,
|
1417
|
+
fetch: options.fetch
|
1418
|
+
});
|
1419
|
+
const provider = function(modelId) {
|
893
1420
|
if (new.target) {
|
894
1421
|
throw new Error(
|
895
1422
|
"The Google Generative AI model function cannot be called with the new keyword."
|
896
1423
|
);
|
897
1424
|
}
|
898
|
-
return createChatModel(modelId
|
1425
|
+
return createChatModel(modelId);
|
899
1426
|
};
|
900
1427
|
provider.languageModel = createChatModel;
|
901
1428
|
provider.chat = createChatModel;
|
@@ -903,9 +1430,9 @@ function createGoogleGenerativeAI(options = {}) {
|
|
903
1430
|
provider.embedding = createEmbeddingModel;
|
904
1431
|
provider.textEmbedding = createEmbeddingModel;
|
905
1432
|
provider.textEmbeddingModel = createEmbeddingModel;
|
906
|
-
provider.
|
907
|
-
|
908
|
-
|
1433
|
+
provider.image = createImageModel;
|
1434
|
+
provider.imageModel = createImageModel;
|
1435
|
+
provider.tools = googleTools;
|
909
1436
|
return provider;
|
910
1437
|
}
|
911
1438
|
var google = createGoogleGenerativeAI();
|