@ai-sdk/google 2.0.0-canary.9 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +559 -0
- package/README.md +2 -2
- package/dist/index.d.mts +157 -267
- package/dist/index.d.ts +157 -267
- package/dist/index.js +732 -220
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +726 -210
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +47 -254
- package/dist/internal/index.d.ts +47 -254
- package/dist/internal/index.js +529 -191
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +516 -179
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +11 -9
package/dist/internal/index.js
CHANGED
@@ -21,14 +21,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
21
21
|
var internal_exports = {};
|
22
22
|
__export(internal_exports, {
|
23
23
|
GoogleGenerativeAILanguageModel: () => GoogleGenerativeAILanguageModel,
|
24
|
-
|
24
|
+
googleTools: () => googleTools,
|
25
25
|
safetyRatingSchema: () => safetyRatingSchema
|
26
26
|
});
|
27
27
|
module.exports = __toCommonJS(internal_exports);
|
28
28
|
|
29
29
|
// src/google-generative-ai-language-model.ts
|
30
|
-
var
|
31
|
-
var
|
30
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
31
|
+
var import_v45 = require("zod/v4");
|
32
32
|
|
33
33
|
// src/convert-json-schema-to-openapi-schema.ts
|
34
34
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
@@ -124,16 +124,18 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
|
124
124
|
return result;
|
125
125
|
}
|
126
126
|
function isEmptyObjectSchema(jsonSchema) {
|
127
|
-
return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0);
|
127
|
+
return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
|
128
128
|
}
|
129
129
|
|
130
130
|
// src/convert-to-google-generative-ai-messages.ts
|
131
131
|
var import_provider = require("@ai-sdk/provider");
|
132
132
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
133
|
-
function convertToGoogleGenerativeAIMessages(prompt) {
|
133
|
+
function convertToGoogleGenerativeAIMessages(prompt, options) {
|
134
|
+
var _a;
|
134
135
|
const systemInstructionParts = [];
|
135
136
|
const contents = [];
|
136
137
|
let systemMessagesAllowed = true;
|
138
|
+
const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
|
137
139
|
for (const { role, content } of prompt) {
|
138
140
|
switch (role) {
|
139
141
|
case "system": {
|
@@ -207,7 +209,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
207
209
|
return {
|
208
210
|
functionCall: {
|
209
211
|
name: part.toolName,
|
210
|
-
args: part.
|
212
|
+
args: part.input
|
211
213
|
}
|
212
214
|
};
|
213
215
|
}
|
@@ -225,7 +227,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
225
227
|
name: part.toolName,
|
226
228
|
response: {
|
227
229
|
name: part.toolName,
|
228
|
-
content: part.
|
230
|
+
content: part.output.value
|
229
231
|
}
|
230
232
|
}
|
231
233
|
}))
|
@@ -234,8 +236,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
234
236
|
}
|
235
237
|
}
|
236
238
|
}
|
239
|
+
if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
|
240
|
+
const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
|
241
|
+
contents[0].parts.unshift({ text: systemText + "\n\n" });
|
242
|
+
}
|
237
243
|
return {
|
238
|
-
systemInstruction: systemInstructionParts.length > 0 ? { parts: systemInstructionParts } : void 0,
|
244
|
+
systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
|
239
245
|
contents
|
240
246
|
};
|
241
247
|
}
|
@@ -247,12 +253,12 @@ function getModelPath(modelId) {
|
|
247
253
|
|
248
254
|
// src/google-error.ts
|
249
255
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
250
|
-
var
|
251
|
-
var googleErrorDataSchema =
|
252
|
-
error:
|
253
|
-
code:
|
254
|
-
message:
|
255
|
-
status:
|
256
|
+
var import_v4 = require("zod/v4");
|
257
|
+
var googleErrorDataSchema = import_v4.z.object({
|
258
|
+
error: import_v4.z.object({
|
259
|
+
code: import_v4.z.number().nullable(),
|
260
|
+
message: import_v4.z.string(),
|
261
|
+
status: import_v4.z.string()
|
256
262
|
})
|
257
263
|
});
|
258
264
|
var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
|
@@ -260,13 +266,73 @@ var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
|
|
260
266
|
errorToMessage: (data) => data.error.message
|
261
267
|
});
|
262
268
|
|
269
|
+
// src/google-generative-ai-options.ts
|
270
|
+
var import_v42 = require("zod/v4");
|
271
|
+
var googleGenerativeAIProviderOptions = import_v42.z.object({
|
272
|
+
responseModalities: import_v42.z.array(import_v42.z.enum(["TEXT", "IMAGE"])).optional(),
|
273
|
+
thinkingConfig: import_v42.z.object({
|
274
|
+
thinkingBudget: import_v42.z.number().optional(),
|
275
|
+
includeThoughts: import_v42.z.boolean().optional()
|
276
|
+
}).optional(),
|
277
|
+
/**
|
278
|
+
Optional.
|
279
|
+
The name of the cached content used as context to serve the prediction.
|
280
|
+
Format: cachedContents/{cachedContent}
|
281
|
+
*/
|
282
|
+
cachedContent: import_v42.z.string().optional(),
|
283
|
+
/**
|
284
|
+
* Optional. Enable structured output. Default is true.
|
285
|
+
*
|
286
|
+
* This is useful when the JSON Schema contains elements that are
|
287
|
+
* not supported by the OpenAPI schema version that
|
288
|
+
* Google Generative AI uses. You can use this to disable
|
289
|
+
* structured outputs if you need to.
|
290
|
+
*/
|
291
|
+
structuredOutputs: import_v42.z.boolean().optional(),
|
292
|
+
/**
|
293
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
294
|
+
*/
|
295
|
+
safetySettings: import_v42.z.array(
|
296
|
+
import_v42.z.object({
|
297
|
+
category: import_v42.z.enum([
|
298
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
299
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
300
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
301
|
+
"HARM_CATEGORY_HARASSMENT",
|
302
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
303
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
304
|
+
]),
|
305
|
+
threshold: import_v42.z.enum([
|
306
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
307
|
+
"BLOCK_LOW_AND_ABOVE",
|
308
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
309
|
+
"BLOCK_ONLY_HIGH",
|
310
|
+
"BLOCK_NONE",
|
311
|
+
"OFF"
|
312
|
+
])
|
313
|
+
})
|
314
|
+
).optional(),
|
315
|
+
threshold: import_v42.z.enum([
|
316
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
317
|
+
"BLOCK_LOW_AND_ABOVE",
|
318
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
319
|
+
"BLOCK_ONLY_HIGH",
|
320
|
+
"BLOCK_NONE",
|
321
|
+
"OFF"
|
322
|
+
]).optional(),
|
323
|
+
/**
|
324
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
325
|
+
*
|
326
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
327
|
+
*/
|
328
|
+
audioTimestamp: import_v42.z.boolean().optional()
|
329
|
+
});
|
330
|
+
|
263
331
|
// src/google-prepare-tools.ts
|
264
332
|
var import_provider2 = require("@ai-sdk/provider");
|
265
333
|
function prepareTools({
|
266
334
|
tools,
|
267
335
|
toolChoice,
|
268
|
-
useSearchGrounding,
|
269
|
-
dynamicRetrievalConfig,
|
270
336
|
modelId
|
271
337
|
}) {
|
272
338
|
var _a;
|
@@ -274,28 +340,87 @@ function prepareTools({
|
|
274
340
|
const toolWarnings = [];
|
275
341
|
const isGemini2 = modelId.includes("gemini-2");
|
276
342
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
277
|
-
if (
|
343
|
+
if (tools == null) {
|
344
|
+
return { tools: void 0, toolConfig: void 0, toolWarnings };
|
345
|
+
}
|
346
|
+
const hasFunctionTools = tools.some((tool) => tool.type === "function");
|
347
|
+
const hasProviderDefinedTools = tools.some(
|
348
|
+
(tool) => tool.type === "provider-defined"
|
349
|
+
);
|
350
|
+
if (hasFunctionTools && hasProviderDefinedTools) {
|
351
|
+
toolWarnings.push({
|
352
|
+
type: "unsupported-tool",
|
353
|
+
tool: tools.find((tool) => tool.type === "function"),
|
354
|
+
details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
|
355
|
+
});
|
356
|
+
}
|
357
|
+
if (hasProviderDefinedTools) {
|
358
|
+
const googleTools2 = {};
|
359
|
+
const providerDefinedTools = tools.filter(
|
360
|
+
(tool) => tool.type === "provider-defined"
|
361
|
+
);
|
362
|
+
providerDefinedTools.forEach((tool) => {
|
363
|
+
switch (tool.id) {
|
364
|
+
case "google.google_search":
|
365
|
+
if (isGemini2) {
|
366
|
+
googleTools2.googleSearch = {};
|
367
|
+
} else if (supportsDynamicRetrieval) {
|
368
|
+
googleTools2.googleSearchRetrieval = {
|
369
|
+
dynamicRetrievalConfig: {
|
370
|
+
mode: tool.args.mode,
|
371
|
+
dynamicThreshold: tool.args.dynamicThreshold
|
372
|
+
}
|
373
|
+
};
|
374
|
+
} else {
|
375
|
+
googleTools2.googleSearchRetrieval = {};
|
376
|
+
}
|
377
|
+
break;
|
378
|
+
case "google.url_context":
|
379
|
+
if (isGemini2) {
|
380
|
+
googleTools2.urlContext = {};
|
381
|
+
} else {
|
382
|
+
toolWarnings.push({
|
383
|
+
type: "unsupported-tool",
|
384
|
+
tool,
|
385
|
+
details: "The URL context tool is not supported with other Gemini models than Gemini 2."
|
386
|
+
});
|
387
|
+
}
|
388
|
+
break;
|
389
|
+
case "google.code_execution":
|
390
|
+
if (isGemini2) {
|
391
|
+
googleTools2.codeExecution = {};
|
392
|
+
} else {
|
393
|
+
toolWarnings.push({
|
394
|
+
type: "unsupported-tool",
|
395
|
+
tool,
|
396
|
+
details: "The code execution tools is not supported with other Gemini models than Gemini 2."
|
397
|
+
});
|
398
|
+
}
|
399
|
+
break;
|
400
|
+
default:
|
401
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
402
|
+
break;
|
403
|
+
}
|
404
|
+
});
|
278
405
|
return {
|
279
|
-
tools:
|
280
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
|
281
|
-
},
|
406
|
+
tools: Object.keys(googleTools2).length > 0 ? googleTools2 : void 0,
|
282
407
|
toolConfig: void 0,
|
283
408
|
toolWarnings
|
284
409
|
};
|
285
410
|
}
|
286
|
-
if (tools == null) {
|
287
|
-
return { tools: void 0, toolConfig: void 0, toolWarnings };
|
288
|
-
}
|
289
411
|
const functionDeclarations = [];
|
290
412
|
for (const tool of tools) {
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
413
|
+
switch (tool.type) {
|
414
|
+
case "function":
|
415
|
+
functionDeclarations.push({
|
416
|
+
name: tool.name,
|
417
|
+
description: (_a = tool.description) != null ? _a : "",
|
418
|
+
parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
|
419
|
+
});
|
420
|
+
break;
|
421
|
+
default:
|
422
|
+
toolWarnings.push({ type: "unsupported-tool", tool });
|
423
|
+
break;
|
299
424
|
}
|
300
425
|
}
|
301
426
|
if (toolChoice == null) {
|
@@ -372,23 +497,80 @@ function mapGoogleGenerativeAIFinishReason({
|
|
372
497
|
}
|
373
498
|
}
|
374
499
|
|
500
|
+
// src/tool/google-search.ts
|
501
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
502
|
+
var import_v43 = require("zod/v4");
|
503
|
+
var groundingChunkSchema = import_v43.z.object({
|
504
|
+
web: import_v43.z.object({ uri: import_v43.z.string(), title: import_v43.z.string() }).nullish(),
|
505
|
+
retrievedContext: import_v43.z.object({ uri: import_v43.z.string(), title: import_v43.z.string() }).nullish()
|
506
|
+
});
|
507
|
+
var groundingMetadataSchema = import_v43.z.object({
|
508
|
+
webSearchQueries: import_v43.z.array(import_v43.z.string()).nullish(),
|
509
|
+
retrievalQueries: import_v43.z.array(import_v43.z.string()).nullish(),
|
510
|
+
searchEntryPoint: import_v43.z.object({ renderedContent: import_v43.z.string() }).nullish(),
|
511
|
+
groundingChunks: import_v43.z.array(groundingChunkSchema).nullish(),
|
512
|
+
groundingSupports: import_v43.z.array(
|
513
|
+
import_v43.z.object({
|
514
|
+
segment: import_v43.z.object({
|
515
|
+
startIndex: import_v43.z.number().nullish(),
|
516
|
+
endIndex: import_v43.z.number().nullish(),
|
517
|
+
text: import_v43.z.string().nullish()
|
518
|
+
}),
|
519
|
+
segment_text: import_v43.z.string().nullish(),
|
520
|
+
groundingChunkIndices: import_v43.z.array(import_v43.z.number()).nullish(),
|
521
|
+
supportChunkIndices: import_v43.z.array(import_v43.z.number()).nullish(),
|
522
|
+
confidenceScores: import_v43.z.array(import_v43.z.number()).nullish(),
|
523
|
+
confidenceScore: import_v43.z.array(import_v43.z.number()).nullish()
|
524
|
+
})
|
525
|
+
).nullish(),
|
526
|
+
retrievalMetadata: import_v43.z.union([
|
527
|
+
import_v43.z.object({
|
528
|
+
webDynamicRetrievalScore: import_v43.z.number()
|
529
|
+
}),
|
530
|
+
import_v43.z.object({})
|
531
|
+
]).nullish()
|
532
|
+
});
|
533
|
+
var googleSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
|
534
|
+
id: "google.google_search",
|
535
|
+
name: "google_search",
|
536
|
+
inputSchema: import_v43.z.object({
|
537
|
+
mode: import_v43.z.enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
|
538
|
+
dynamicThreshold: import_v43.z.number().default(1)
|
539
|
+
})
|
540
|
+
});
|
541
|
+
|
542
|
+
// src/tool/url-context.ts
|
543
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
544
|
+
var import_v44 = require("zod/v4");
|
545
|
+
var urlMetadataSchema = import_v44.z.object({
|
546
|
+
retrievedUrl: import_v44.z.string(),
|
547
|
+
urlRetrievalStatus: import_v44.z.string()
|
548
|
+
});
|
549
|
+
var urlContextMetadataSchema = import_v44.z.object({
|
550
|
+
urlMetadata: import_v44.z.array(urlMetadataSchema)
|
551
|
+
});
|
552
|
+
var urlContext = (0, import_provider_utils4.createProviderDefinedToolFactory)({
|
553
|
+
id: "google.url_context",
|
554
|
+
name: "url_context",
|
555
|
+
inputSchema: import_v44.z.object({})
|
556
|
+
});
|
557
|
+
|
375
558
|
// src/google-generative-ai-language-model.ts
|
376
559
|
var GoogleGenerativeAILanguageModel = class {
|
377
|
-
constructor(modelId,
|
560
|
+
constructor(modelId, config) {
|
378
561
|
this.specificationVersion = "v2";
|
379
|
-
|
380
|
-
this.supportsImageUrls = false;
|
562
|
+
var _a;
|
381
563
|
this.modelId = modelId;
|
382
|
-
this.settings = settings;
|
383
564
|
this.config = config;
|
384
|
-
|
385
|
-
get supportsStructuredOutputs() {
|
386
|
-
var _a;
|
387
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
565
|
+
this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils5.generateId;
|
388
566
|
}
|
389
567
|
get provider() {
|
390
568
|
return this.config.provider;
|
391
569
|
}
|
570
|
+
get supportedUrls() {
|
571
|
+
var _a, _b, _c;
|
572
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
573
|
+
}
|
392
574
|
async getArgs({
|
393
575
|
prompt,
|
394
576
|
maxOutputTokens,
|
@@ -404,23 +586,31 @@ var GoogleGenerativeAILanguageModel = class {
|
|
404
586
|
toolChoice,
|
405
587
|
providerOptions
|
406
588
|
}) {
|
407
|
-
var _a;
|
589
|
+
var _a, _b;
|
408
590
|
const warnings = [];
|
409
|
-
const googleOptions = (0,
|
591
|
+
const googleOptions = await (0, import_provider_utils5.parseProviderOptions)({
|
410
592
|
provider: "google",
|
411
593
|
providerOptions,
|
412
|
-
schema:
|
594
|
+
schema: googleGenerativeAIProviderOptions
|
413
595
|
});
|
414
|
-
|
596
|
+
if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
|
597
|
+
warnings.push({
|
598
|
+
type: "other",
|
599
|
+
message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
|
600
|
+
});
|
601
|
+
}
|
602
|
+
const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
|
603
|
+
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
|
604
|
+
prompt,
|
605
|
+
{ isGemmaModel }
|
606
|
+
);
|
415
607
|
const {
|
416
|
-
tools:
|
608
|
+
tools: googleTools2,
|
417
609
|
toolConfig: googleToolConfig,
|
418
610
|
toolWarnings
|
419
611
|
} = prepareTools({
|
420
612
|
tools,
|
421
613
|
toolChoice,
|
422
|
-
useSearchGrounding: (_a = this.settings.useSearchGrounding) != null ? _a : false,
|
423
|
-
dynamicRetrievalConfig: this.settings.dynamicRetrievalConfig,
|
424
614
|
modelId: this.modelId
|
425
615
|
});
|
426
616
|
return {
|
@@ -439,62 +629,89 @@ var GoogleGenerativeAILanguageModel = class {
|
|
439
629
|
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
440
630
|
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
441
631
|
// so this is needed as an escape hatch:
|
442
|
-
|
443
|
-
|
444
|
-
|
632
|
+
// TODO convert into provider option
|
633
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
634
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
635
|
+
audioTimestamp: googleOptions.audioTimestamp
|
445
636
|
},
|
446
637
|
// provider options:
|
447
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities
|
638
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
639
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
448
640
|
},
|
449
641
|
contents,
|
450
|
-
systemInstruction,
|
451
|
-
safetySettings:
|
452
|
-
tools:
|
642
|
+
systemInstruction: isGemmaModel ? void 0 : systemInstruction,
|
643
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
644
|
+
tools: googleTools2,
|
453
645
|
toolConfig: googleToolConfig,
|
454
|
-
cachedContent:
|
646
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
455
647
|
},
|
456
648
|
warnings: [...warnings, ...toolWarnings]
|
457
649
|
};
|
458
650
|
}
|
459
|
-
supportsUrl(url) {
|
460
|
-
return this.config.isSupportedUrl(url);
|
461
|
-
}
|
462
651
|
async doGenerate(options) {
|
463
|
-
var _a, _b, _c, _d, _e, _f;
|
652
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
464
653
|
const { args, warnings } = await this.getArgs(options);
|
465
654
|
const body = JSON.stringify(args);
|
466
|
-
const mergedHeaders = (0,
|
467
|
-
await (0,
|
655
|
+
const mergedHeaders = (0, import_provider_utils5.combineHeaders)(
|
656
|
+
await (0, import_provider_utils5.resolve)(this.config.headers),
|
468
657
|
options.headers
|
469
658
|
);
|
470
659
|
const {
|
471
660
|
responseHeaders,
|
472
661
|
value: response,
|
473
662
|
rawValue: rawResponse
|
474
|
-
} = await (0,
|
663
|
+
} = await (0, import_provider_utils5.postJsonToApi)({
|
475
664
|
url: `${this.config.baseURL}/${getModelPath(
|
476
665
|
this.modelId
|
477
666
|
)}:generateContent`,
|
478
667
|
headers: mergedHeaders,
|
479
668
|
body: args,
|
480
669
|
failedResponseHandler: googleFailedResponseHandler,
|
481
|
-
successfulResponseHandler: (0,
|
670
|
+
successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(responseSchema),
|
482
671
|
abortSignal: options.abortSignal,
|
483
672
|
fetch: this.config.fetch
|
484
673
|
});
|
485
674
|
const candidate = response.candidates[0];
|
486
675
|
const content = [];
|
487
|
-
const parts =
|
676
|
+
const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
|
677
|
+
const usageMetadata = response.usageMetadata;
|
678
|
+
let lastCodeExecutionToolCallId;
|
488
679
|
for (const part of parts) {
|
489
|
-
if ("
|
490
|
-
|
680
|
+
if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
|
681
|
+
const toolCallId = this.config.generateId();
|
682
|
+
lastCodeExecutionToolCallId = toolCallId;
|
683
|
+
content.push({
|
684
|
+
type: "tool-call",
|
685
|
+
toolCallId,
|
686
|
+
toolName: "code_execution",
|
687
|
+
input: JSON.stringify(part.executableCode),
|
688
|
+
providerExecuted: true
|
689
|
+
});
|
690
|
+
} else if ("codeExecutionResult" in part && part.codeExecutionResult) {
|
691
|
+
content.push({
|
692
|
+
type: "tool-result",
|
693
|
+
// Assumes a result directly follows its corresponding call part.
|
694
|
+
toolCallId: lastCodeExecutionToolCallId,
|
695
|
+
toolName: "code_execution",
|
696
|
+
result: {
|
697
|
+
outcome: part.codeExecutionResult.outcome,
|
698
|
+
output: part.codeExecutionResult.output
|
699
|
+
},
|
700
|
+
providerExecuted: true
|
701
|
+
});
|
702
|
+
lastCodeExecutionToolCallId = void 0;
|
703
|
+
} else if ("text" in part && part.text != null && part.text.length > 0) {
|
704
|
+
if (part.thought === true) {
|
705
|
+
content.push({ type: "reasoning", text: part.text });
|
706
|
+
} else {
|
707
|
+
content.push({ type: "text", text: part.text });
|
708
|
+
}
|
491
709
|
} else if ("functionCall" in part) {
|
492
710
|
content.push({
|
493
711
|
type: "tool-call",
|
494
|
-
toolCallType: "function",
|
495
712
|
toolCallId: this.config.generateId(),
|
496
713
|
toolName: part.functionCall.name,
|
497
|
-
|
714
|
+
input: JSON.stringify(part.functionCall.args)
|
498
715
|
});
|
499
716
|
} else if ("inlineData" in part) {
|
500
717
|
content.push({
|
@@ -504,14 +721,13 @@ var GoogleGenerativeAILanguageModel = class {
|
|
504
721
|
});
|
505
722
|
}
|
506
723
|
}
|
507
|
-
const sources = (
|
724
|
+
const sources = (_d = extractSources({
|
508
725
|
groundingMetadata: candidate.groundingMetadata,
|
509
726
|
generateId: this.config.generateId
|
510
|
-
})) != null ?
|
727
|
+
})) != null ? _d : [];
|
511
728
|
for (const source of sources) {
|
512
729
|
content.push(source);
|
513
730
|
}
|
514
|
-
const usageMetadata = response.usageMetadata;
|
515
731
|
return {
|
516
732
|
content,
|
517
733
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
@@ -519,14 +735,19 @@ var GoogleGenerativeAILanguageModel = class {
|
|
519
735
|
hasToolCalls: content.some((part) => part.type === "tool-call")
|
520
736
|
}),
|
521
737
|
usage: {
|
522
|
-
inputTokens: (
|
523
|
-
outputTokens: (
|
738
|
+
inputTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _e : void 0,
|
739
|
+
outputTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _f : void 0,
|
740
|
+
totalTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _g : void 0,
|
741
|
+
reasoningTokens: (_h = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _h : void 0,
|
742
|
+
cachedInputTokens: (_i = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _i : void 0
|
524
743
|
},
|
525
744
|
warnings,
|
526
745
|
providerMetadata: {
|
527
746
|
google: {
|
528
|
-
groundingMetadata: (
|
529
|
-
|
747
|
+
groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
|
748
|
+
urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
|
749
|
+
safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null,
|
750
|
+
usageMetadata: usageMetadata != null ? usageMetadata : null
|
530
751
|
}
|
531
752
|
},
|
532
753
|
request: { body },
|
@@ -540,29 +761,35 @@ var GoogleGenerativeAILanguageModel = class {
|
|
540
761
|
async doStream(options) {
|
541
762
|
const { args, warnings } = await this.getArgs(options);
|
542
763
|
const body = JSON.stringify(args);
|
543
|
-
const headers = (0,
|
544
|
-
await (0,
|
764
|
+
const headers = (0, import_provider_utils5.combineHeaders)(
|
765
|
+
await (0, import_provider_utils5.resolve)(this.config.headers),
|
545
766
|
options.headers
|
546
767
|
);
|
547
|
-
const { responseHeaders, value: response } = await (0,
|
768
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
|
548
769
|
url: `${this.config.baseURL}/${getModelPath(
|
549
770
|
this.modelId
|
550
771
|
)}:streamGenerateContent?alt=sse`,
|
551
772
|
headers,
|
552
773
|
body: args,
|
553
774
|
failedResponseHandler: googleFailedResponseHandler,
|
554
|
-
successfulResponseHandler: (0,
|
775
|
+
successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(chunkSchema),
|
555
776
|
abortSignal: options.abortSignal,
|
556
777
|
fetch: this.config.fetch
|
557
778
|
});
|
558
779
|
let finishReason = "unknown";
|
559
780
|
const usage = {
|
560
781
|
inputTokens: void 0,
|
561
|
-
outputTokens: void 0
|
782
|
+
outputTokens: void 0,
|
783
|
+
totalTokens: void 0
|
562
784
|
};
|
563
785
|
let providerMetadata = void 0;
|
564
|
-
const
|
786
|
+
const generateId2 = this.config.generateId;
|
565
787
|
let hasToolCalls = false;
|
788
|
+
let currentTextBlockId = null;
|
789
|
+
let currentReasoningBlockId = null;
|
790
|
+
let blockCounter = 0;
|
791
|
+
const emittedSourceUrls = /* @__PURE__ */ new Set();
|
792
|
+
let lastCodeExecutionToolCallId;
|
566
793
|
return {
|
567
794
|
stream: response.pipeThrough(
|
568
795
|
new TransformStream({
|
@@ -570,7 +797,10 @@ var GoogleGenerativeAILanguageModel = class {
|
|
570
797
|
controller.enqueue({ type: "stream-start", warnings });
|
571
798
|
},
|
572
799
|
transform(chunk, controller) {
|
573
|
-
var _a, _b, _c, _d, _e, _f;
|
800
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
801
|
+
if (options.includeRawChunks) {
|
802
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
803
|
+
}
|
574
804
|
if (!chunk.success) {
|
575
805
|
controller.enqueue({ type: "error", error: chunk.error });
|
576
806
|
return;
|
@@ -580,16 +810,99 @@ var GoogleGenerativeAILanguageModel = class {
|
|
580
810
|
if (usageMetadata != null) {
|
581
811
|
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
582
812
|
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
813
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
814
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
815
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
583
816
|
}
|
584
|
-
const candidate = (
|
817
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
585
818
|
if (candidate == null) {
|
586
819
|
return;
|
587
820
|
}
|
588
821
|
const content = candidate.content;
|
822
|
+
const sources = extractSources({
|
823
|
+
groundingMetadata: candidate.groundingMetadata,
|
824
|
+
generateId: generateId2
|
825
|
+
});
|
826
|
+
if (sources != null) {
|
827
|
+
for (const source of sources) {
|
828
|
+
if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
|
829
|
+
emittedSourceUrls.add(source.url);
|
830
|
+
controller.enqueue(source);
|
831
|
+
}
|
832
|
+
}
|
833
|
+
}
|
589
834
|
if (content != null) {
|
590
|
-
const
|
591
|
-
|
592
|
-
|
835
|
+
const parts = (_g = content.parts) != null ? _g : [];
|
836
|
+
for (const part of parts) {
|
837
|
+
if ("executableCode" in part && ((_h = part.executableCode) == null ? void 0 : _h.code)) {
|
838
|
+
const toolCallId = generateId2();
|
839
|
+
lastCodeExecutionToolCallId = toolCallId;
|
840
|
+
controller.enqueue({
|
841
|
+
type: "tool-call",
|
842
|
+
toolCallId,
|
843
|
+
toolName: "code_execution",
|
844
|
+
input: JSON.stringify(part.executableCode),
|
845
|
+
providerExecuted: true
|
846
|
+
});
|
847
|
+
hasToolCalls = true;
|
848
|
+
} else if ("codeExecutionResult" in part && part.codeExecutionResult) {
|
849
|
+
const toolCallId = lastCodeExecutionToolCallId;
|
850
|
+
if (toolCallId) {
|
851
|
+
controller.enqueue({
|
852
|
+
type: "tool-result",
|
853
|
+
toolCallId,
|
854
|
+
toolName: "code_execution",
|
855
|
+
result: {
|
856
|
+
outcome: part.codeExecutionResult.outcome,
|
857
|
+
output: part.codeExecutionResult.output
|
858
|
+
},
|
859
|
+
providerExecuted: true
|
860
|
+
});
|
861
|
+
lastCodeExecutionToolCallId = void 0;
|
862
|
+
}
|
863
|
+
} else if ("text" in part && part.text != null && part.text.length > 0) {
|
864
|
+
if (part.thought === true) {
|
865
|
+
if (currentTextBlockId !== null) {
|
866
|
+
controller.enqueue({
|
867
|
+
type: "text-end",
|
868
|
+
id: currentTextBlockId
|
869
|
+
});
|
870
|
+
currentTextBlockId = null;
|
871
|
+
}
|
872
|
+
if (currentReasoningBlockId === null) {
|
873
|
+
currentReasoningBlockId = String(blockCounter++);
|
874
|
+
controller.enqueue({
|
875
|
+
type: "reasoning-start",
|
876
|
+
id: currentReasoningBlockId
|
877
|
+
});
|
878
|
+
}
|
879
|
+
controller.enqueue({
|
880
|
+
type: "reasoning-delta",
|
881
|
+
id: currentReasoningBlockId,
|
882
|
+
delta: part.text
|
883
|
+
});
|
884
|
+
} else {
|
885
|
+
if (currentReasoningBlockId !== null) {
|
886
|
+
controller.enqueue({
|
887
|
+
type: "reasoning-end",
|
888
|
+
id: currentReasoningBlockId
|
889
|
+
});
|
890
|
+
currentReasoningBlockId = null;
|
891
|
+
}
|
892
|
+
if (currentTextBlockId === null) {
|
893
|
+
currentTextBlockId = String(blockCounter++);
|
894
|
+
controller.enqueue({
|
895
|
+
type: "text-start",
|
896
|
+
id: currentTextBlockId
|
897
|
+
});
|
898
|
+
}
|
899
|
+
controller.enqueue({
|
900
|
+
type: "text-delta",
|
901
|
+
id: currentTextBlockId,
|
902
|
+
delta: part.text
|
903
|
+
});
|
904
|
+
}
|
905
|
+
}
|
593
906
|
}
|
594
907
|
const inlineDataParts = getInlineDataParts(content.parts);
|
595
908
|
if (inlineDataParts != null) {
|
@@ -603,23 +916,29 @@ var GoogleGenerativeAILanguageModel = class {
|
|
603
916
|
}
|
604
917
|
const toolCallDeltas = getToolCallsFromParts({
|
605
918
|
parts: content.parts,
|
606
|
-
generateId
|
919
|
+
generateId: generateId2
|
607
920
|
});
|
608
921
|
if (toolCallDeltas != null) {
|
609
922
|
for (const toolCall of toolCallDeltas) {
|
610
923
|
controller.enqueue({
|
611
|
-
type: "tool-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
924
|
+
type: "tool-input-start",
|
925
|
+
id: toolCall.toolCallId,
|
926
|
+
toolName: toolCall.toolName
|
927
|
+
});
|
928
|
+
controller.enqueue({
|
929
|
+
type: "tool-input-delta",
|
930
|
+
id: toolCall.toolCallId,
|
931
|
+
delta: toolCall.args
|
932
|
+
});
|
933
|
+
controller.enqueue({
|
934
|
+
type: "tool-input-end",
|
935
|
+
id: toolCall.toolCallId
|
616
936
|
});
|
617
937
|
controller.enqueue({
|
618
938
|
type: "tool-call",
|
619
|
-
toolCallType: "function",
|
620
939
|
toolCallId: toolCall.toolCallId,
|
621
940
|
toolName: toolCall.toolName,
|
622
|
-
|
941
|
+
input: toolCall.args
|
623
942
|
});
|
624
943
|
hasToolCalls = true;
|
625
944
|
}
|
@@ -630,22 +949,31 @@ var GoogleGenerativeAILanguageModel = class {
|
|
630
949
|
finishReason: candidate.finishReason,
|
631
950
|
hasToolCalls
|
632
951
|
});
|
633
|
-
const sources = (_d = extractSources({
|
634
|
-
groundingMetadata: candidate.groundingMetadata,
|
635
|
-
generateId
|
636
|
-
})) != null ? _d : [];
|
637
|
-
for (const source of sources) {
|
638
|
-
controller.enqueue(source);
|
639
|
-
}
|
640
952
|
providerMetadata = {
|
641
953
|
google: {
|
642
|
-
groundingMetadata: (
|
643
|
-
|
954
|
+
groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
|
955
|
+
urlContextMetadata: (_j = candidate.urlContextMetadata) != null ? _j : null,
|
956
|
+
safetyRatings: (_k = candidate.safetyRatings) != null ? _k : null
|
644
957
|
}
|
645
958
|
};
|
959
|
+
if (usageMetadata != null) {
|
960
|
+
providerMetadata.google.usageMetadata = usageMetadata;
|
961
|
+
}
|
646
962
|
}
|
647
963
|
},
|
648
964
|
flush(controller) {
|
965
|
+
if (currentTextBlockId !== null) {
|
966
|
+
controller.enqueue({
|
967
|
+
type: "text-end",
|
968
|
+
id: currentTextBlockId
|
969
|
+
});
|
970
|
+
}
|
971
|
+
if (currentReasoningBlockId !== null) {
|
972
|
+
controller.enqueue({
|
973
|
+
type: "reasoning-end",
|
974
|
+
id: currentReasoningBlockId
|
975
|
+
});
|
976
|
+
}
|
649
977
|
controller.enqueue({
|
650
978
|
type: "finish",
|
651
979
|
finishReason,
|
@@ -662,26 +990,18 @@ var GoogleGenerativeAILanguageModel = class {
|
|
662
990
|
};
|
663
991
|
function getToolCallsFromParts({
|
664
992
|
parts,
|
665
|
-
generateId
|
993
|
+
generateId: generateId2
|
666
994
|
}) {
|
667
995
|
const functionCallParts = parts == null ? void 0 : parts.filter(
|
668
996
|
(part) => "functionCall" in part
|
669
997
|
);
|
670
998
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
671
999
|
type: "tool-call",
|
672
|
-
|
673
|
-
toolCallId: generateId(),
|
1000
|
+
toolCallId: generateId2(),
|
674
1001
|
toolName: part.functionCall.name,
|
675
1002
|
args: JSON.stringify(part.functionCall.args)
|
676
1003
|
}));
|
677
1004
|
}
|
678
|
-
function getTextFromParts(parts) {
|
679
|
-
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
680
|
-
return textParts == null || textParts.length === 0 ? void 0 : {
|
681
|
-
type: "text",
|
682
|
-
text: textParts.map((part) => part.text).join("")
|
683
|
-
};
|
684
|
-
}
|
685
1005
|
function getInlineDataParts(parts) {
|
686
1006
|
return parts == null ? void 0 : parts.filter(
|
687
1007
|
(part) => "inlineData" in part
|
@@ -689,7 +1009,7 @@ function getInlineDataParts(parts) {
|
|
689
1009
|
}
|
690
1010
|
function extractSources({
|
691
1011
|
groundingMetadata,
|
692
|
-
generateId
|
1012
|
+
generateId: generateId2
|
693
1013
|
}) {
|
694
1014
|
var _a;
|
695
1015
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
@@ -697,108 +1017,126 @@ function extractSources({
|
|
697
1017
|
).map((chunk) => ({
|
698
1018
|
type: "source",
|
699
1019
|
sourceType: "url",
|
700
|
-
id:
|
1020
|
+
id: generateId2(),
|
701
1021
|
url: chunk.web.uri,
|
702
1022
|
title: chunk.web.title
|
703
1023
|
}));
|
704
1024
|
}
|
705
|
-
var contentSchema =
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
functionCall: import_zod2.z.object({
|
714
|
-
name: import_zod2.z.string(),
|
715
|
-
args: import_zod2.z.unknown()
|
1025
|
+
var contentSchema = import_v45.z.object({
|
1026
|
+
parts: import_v45.z.array(
|
1027
|
+
import_v45.z.union([
|
1028
|
+
// note: order matters since text can be fully empty
|
1029
|
+
import_v45.z.object({
|
1030
|
+
functionCall: import_v45.z.object({
|
1031
|
+
name: import_v45.z.string(),
|
1032
|
+
args: import_v45.z.unknown()
|
716
1033
|
})
|
717
1034
|
}),
|
718
|
-
|
719
|
-
inlineData:
|
720
|
-
mimeType:
|
721
|
-
data:
|
1035
|
+
import_v45.z.object({
|
1036
|
+
inlineData: import_v45.z.object({
|
1037
|
+
mimeType: import_v45.z.string(),
|
1038
|
+
data: import_v45.z.string()
|
722
1039
|
})
|
1040
|
+
}),
|
1041
|
+
import_v45.z.object({
|
1042
|
+
executableCode: import_v45.z.object({
|
1043
|
+
language: import_v45.z.string(),
|
1044
|
+
code: import_v45.z.string()
|
1045
|
+
}).nullish(),
|
1046
|
+
codeExecutionResult: import_v45.z.object({
|
1047
|
+
outcome: import_v45.z.string(),
|
1048
|
+
output: import_v45.z.string()
|
1049
|
+
}).nullish(),
|
1050
|
+
text: import_v45.z.string().nullish(),
|
1051
|
+
thought: import_v45.z.boolean().nullish()
|
723
1052
|
})
|
724
1053
|
])
|
725
1054
|
).nullish()
|
726
1055
|
});
|
727
|
-
var
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
searchEntryPoint: import_zod2.z.object({ renderedContent: import_zod2.z.string() }).nullish(),
|
735
|
-
groundingChunks: import_zod2.z.array(groundingChunkSchema).nullish(),
|
736
|
-
groundingSupports: import_zod2.z.array(
|
737
|
-
import_zod2.z.object({
|
738
|
-
segment: import_zod2.z.object({
|
739
|
-
startIndex: import_zod2.z.number().nullish(),
|
740
|
-
endIndex: import_zod2.z.number().nullish(),
|
741
|
-
text: import_zod2.z.string().nullish()
|
742
|
-
}),
|
743
|
-
segment_text: import_zod2.z.string().nullish(),
|
744
|
-
groundingChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
|
745
|
-
supportChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
|
746
|
-
confidenceScores: import_zod2.z.array(import_zod2.z.number()).nullish(),
|
747
|
-
confidenceScore: import_zod2.z.array(import_zod2.z.number()).nullish()
|
748
|
-
})
|
749
|
-
).nullish(),
|
750
|
-
retrievalMetadata: import_zod2.z.union([
|
751
|
-
import_zod2.z.object({
|
752
|
-
webDynamicRetrievalScore: import_zod2.z.number()
|
753
|
-
}),
|
754
|
-
import_zod2.z.object({})
|
755
|
-
]).nullish()
|
1056
|
+
var safetyRatingSchema = import_v45.z.object({
|
1057
|
+
category: import_v45.z.string().nullish(),
|
1058
|
+
probability: import_v45.z.string().nullish(),
|
1059
|
+
probabilityScore: import_v45.z.number().nullish(),
|
1060
|
+
severity: import_v45.z.string().nullish(),
|
1061
|
+
severityScore: import_v45.z.number().nullish(),
|
1062
|
+
blocked: import_v45.z.boolean().nullish()
|
756
1063
|
});
|
757
|
-
var
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
blocked: import_zod2.z.boolean().nullish()
|
1064
|
+
var usageSchema = import_v45.z.object({
|
1065
|
+
cachedContentTokenCount: import_v45.z.number().nullish(),
|
1066
|
+
thoughtsTokenCount: import_v45.z.number().nullish(),
|
1067
|
+
promptTokenCount: import_v45.z.number().nullish(),
|
1068
|
+
candidatesTokenCount: import_v45.z.number().nullish(),
|
1069
|
+
totalTokenCount: import_v45.z.number().nullish()
|
764
1070
|
});
|
765
|
-
var responseSchema =
|
766
|
-
candidates:
|
767
|
-
|
768
|
-
content: contentSchema.nullish().or(
|
769
|
-
finishReason:
|
770
|
-
safetyRatings:
|
771
|
-
groundingMetadata: groundingMetadataSchema.nullish()
|
1071
|
+
var responseSchema = import_v45.z.object({
|
1072
|
+
candidates: import_v45.z.array(
|
1073
|
+
import_v45.z.object({
|
1074
|
+
content: contentSchema.nullish().or(import_v45.z.object({}).strict()),
|
1075
|
+
finishReason: import_v45.z.string().nullish(),
|
1076
|
+
safetyRatings: import_v45.z.array(safetyRatingSchema).nullish(),
|
1077
|
+
groundingMetadata: groundingMetadataSchema.nullish(),
|
1078
|
+
urlContextMetadata: urlContextMetadataSchema.nullish()
|
772
1079
|
})
|
773
1080
|
),
|
774
|
-
usageMetadata:
|
775
|
-
promptTokenCount: import_zod2.z.number().nullish(),
|
776
|
-
candidatesTokenCount: import_zod2.z.number().nullish(),
|
777
|
-
totalTokenCount: import_zod2.z.number().nullish()
|
778
|
-
}).nullish()
|
1081
|
+
usageMetadata: usageSchema.nullish()
|
779
1082
|
});
|
780
|
-
var chunkSchema =
|
781
|
-
candidates:
|
782
|
-
|
1083
|
+
var chunkSchema = import_v45.z.object({
|
1084
|
+
candidates: import_v45.z.array(
|
1085
|
+
import_v45.z.object({
|
783
1086
|
content: contentSchema.nullish(),
|
784
|
-
finishReason:
|
785
|
-
safetyRatings:
|
786
|
-
groundingMetadata: groundingMetadataSchema.nullish()
|
1087
|
+
finishReason: import_v45.z.string().nullish(),
|
1088
|
+
safetyRatings: import_v45.z.array(safetyRatingSchema).nullish(),
|
1089
|
+
groundingMetadata: groundingMetadataSchema.nullish(),
|
1090
|
+
urlContextMetadata: urlContextMetadataSchema.nullish()
|
787
1091
|
})
|
788
1092
|
).nullish(),
|
789
|
-
usageMetadata:
|
790
|
-
promptTokenCount: import_zod2.z.number().nullish(),
|
791
|
-
candidatesTokenCount: import_zod2.z.number().nullish(),
|
792
|
-
totalTokenCount: import_zod2.z.number().nullish()
|
793
|
-
}).nullish()
|
1093
|
+
usageMetadata: usageSchema.nullish()
|
794
1094
|
});
|
795
|
-
|
796
|
-
|
1095
|
+
|
1096
|
+
// src/tool/code-execution.ts
|
1097
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
1098
|
+
var import_v46 = require("zod/v4");
|
1099
|
+
var codeExecution = (0, import_provider_utils6.createProviderDefinedToolFactoryWithOutputSchema)({
|
1100
|
+
id: "google.code_execution",
|
1101
|
+
name: "code_execution",
|
1102
|
+
inputSchema: import_v46.z.object({
|
1103
|
+
language: import_v46.z.string().describe("The programming language of the code."),
|
1104
|
+
code: import_v46.z.string().describe("The code to be executed.")
|
1105
|
+
}),
|
1106
|
+
outputSchema: import_v46.z.object({
|
1107
|
+
outcome: import_v46.z.string().describe('The outcome of the execution (e.g., "OUTCOME_OK").'),
|
1108
|
+
output: import_v46.z.string().describe("The output from the code execution.")
|
1109
|
+
})
|
797
1110
|
});
|
1111
|
+
|
1112
|
+
// src/google-tools.ts
|
1113
|
+
var googleTools = {
|
1114
|
+
/**
|
1115
|
+
* Creates a Google search tool that gives Google direct access to real-time web content.
|
1116
|
+
* Must have name "google_search".
|
1117
|
+
*/
|
1118
|
+
googleSearch,
|
1119
|
+
/**
|
1120
|
+
* Creates a URL context tool that gives Google direct access to real-time web content.
|
1121
|
+
* Must have name "url_context".
|
1122
|
+
*/
|
1123
|
+
urlContext,
|
1124
|
+
/**
|
1125
|
+
* A tool that enables the model to generate and run Python code.
|
1126
|
+
* Must have name "code_execution".
|
1127
|
+
*
|
1128
|
+
* @note Ensure the selected model supports Code Execution.
|
1129
|
+
* Multi-tool usage with the code execution tool is typically compatible with Gemini >=2 models.
|
1130
|
+
*
|
1131
|
+
* @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)
|
1132
|
+
* @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)
|
1133
|
+
*/
|
1134
|
+
codeExecution
|
1135
|
+
};
|
798
1136
|
// Annotate the CommonJS export names for ESM import in node:
|
799
1137
|
0 && (module.exports = {
|
800
1138
|
GoogleGenerativeAILanguageModel,
|
801
|
-
|
1139
|
+
googleTools,
|
802
1140
|
safetyRatingSchema
|
803
1141
|
});
|
804
1142
|
//# sourceMappingURL=index.js.map
|