@ai-sdk/google 1.2.19 → 2.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +231 -36
- package/dist/index.d.mts +107 -91
- package/dist/index.d.ts +107 -91
- package/dist/index.js +457 -390
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +462 -390
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +13 -97
- package/{internal/dist → dist/internal}/index.d.ts +13 -97
- package/{internal/dist → dist/internal}/index.js +308 -285
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +311 -286
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
@@ -7,11 +7,11 @@ import {
|
|
7
7
|
postJsonToApi,
|
8
8
|
resolve
|
9
9
|
} from "@ai-sdk/provider-utils";
|
10
|
-
import { z as
|
10
|
+
import { z as z3 } from "zod";
|
11
11
|
|
12
12
|
// src/convert-json-schema-to-openapi-schema.ts
|
13
13
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
14
|
-
if (isEmptyObjectSchema(jsonSchema)) {
|
14
|
+
if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
|
15
15
|
return void 0;
|
16
16
|
}
|
17
17
|
if (typeof jsonSchema === "boolean") {
|
@@ -110,9 +110,10 @@ function isEmptyObjectSchema(jsonSchema) {
|
|
110
110
|
import {
|
111
111
|
UnsupportedFunctionalityError
|
112
112
|
} from "@ai-sdk/provider";
|
113
|
-
import {
|
113
|
+
import {
|
114
|
+
convertToBase64
|
115
|
+
} from "@ai-sdk/provider-utils";
|
114
116
|
function convertToGoogleGenerativeAIMessages(prompt) {
|
115
|
-
var _a, _b;
|
116
117
|
const systemInstructionParts = [];
|
117
118
|
const contents = [];
|
118
119
|
let systemMessagesAllowed = true;
|
@@ -136,33 +137,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
136
137
|
parts.push({ text: part.text });
|
137
138
|
break;
|
138
139
|
}
|
139
|
-
case "image": {
|
140
|
-
parts.push(
|
141
|
-
part.image instanceof URL ? {
|
142
|
-
fileData: {
|
143
|
-
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
|
144
|
-
fileUri: part.image.toString()
|
145
|
-
}
|
146
|
-
} : {
|
147
|
-
inlineData: {
|
148
|
-
mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
|
149
|
-
data: convertUint8ArrayToBase64(part.image)
|
150
|
-
}
|
151
|
-
}
|
152
|
-
);
|
153
|
-
break;
|
154
|
-
}
|
155
140
|
case "file": {
|
141
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
156
142
|
parts.push(
|
157
143
|
part.data instanceof URL ? {
|
158
144
|
fileData: {
|
159
|
-
mimeType:
|
145
|
+
mimeType: mediaType,
|
160
146
|
fileUri: part.data.toString()
|
161
147
|
}
|
162
148
|
} : {
|
163
149
|
inlineData: {
|
164
|
-
mimeType:
|
165
|
-
data: part.data
|
150
|
+
mimeType: mediaType,
|
151
|
+
data: convertToBase64(part.data)
|
166
152
|
}
|
167
153
|
}
|
168
154
|
);
|
@@ -183,7 +169,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
183
169
|
return part.text.length === 0 ? void 0 : { text: part.text };
|
184
170
|
}
|
185
171
|
case "file": {
|
186
|
-
if (part.
|
172
|
+
if (part.mediaType !== "image/png") {
|
187
173
|
throw new UnsupportedFunctionalityError({
|
188
174
|
functionality: "Only PNG images are supported in assistant messages"
|
189
175
|
});
|
@@ -195,8 +181,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
195
181
|
}
|
196
182
|
return {
|
197
183
|
inlineData: {
|
198
|
-
mimeType: part.
|
199
|
-
data: part.data
|
184
|
+
mimeType: part.mediaType,
|
185
|
+
data: convertToBase64(part.data)
|
200
186
|
}
|
201
187
|
};
|
202
188
|
}
|
@@ -257,20 +243,113 @@ var googleFailedResponseHandler = createJsonErrorResponseHandler({
|
|
257
243
|
errorToMessage: (data) => data.error.message
|
258
244
|
});
|
259
245
|
|
246
|
+
// src/google-generative-ai-options.ts
|
247
|
+
import { z as z2 } from "zod";
|
248
|
+
var dynamicRetrievalConfig = z2.object({
|
249
|
+
/**
|
250
|
+
* The mode of the predictor to be used in dynamic retrieval.
|
251
|
+
*/
|
252
|
+
mode: z2.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
|
253
|
+
/**
|
254
|
+
* The threshold to be used in dynamic retrieval. If not set, a system default
|
255
|
+
* value is used.
|
256
|
+
*/
|
257
|
+
dynamicThreshold: z2.number().optional()
|
258
|
+
});
|
259
|
+
var googleGenerativeAIProviderOptions = z2.object({
|
260
|
+
responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).optional(),
|
261
|
+
thinkingConfig: z2.object({
|
262
|
+
thinkingBudget: z2.number().optional(),
|
263
|
+
includeThoughts: z2.boolean().optional()
|
264
|
+
}).optional(),
|
265
|
+
/**
|
266
|
+
Optional.
|
267
|
+
The name of the cached content used as context to serve the prediction.
|
268
|
+
Format: cachedContents/{cachedContent}
|
269
|
+
*/
|
270
|
+
cachedContent: z2.string().optional(),
|
271
|
+
/**
|
272
|
+
* Optional. Enable structured output. Default is true.
|
273
|
+
*
|
274
|
+
* This is useful when the JSON Schema contains elements that are
|
275
|
+
* not supported by the OpenAPI schema version that
|
276
|
+
* Google Generative AI uses. You can use this to disable
|
277
|
+
* structured outputs if you need to.
|
278
|
+
*/
|
279
|
+
structuredOutputs: z2.boolean().optional(),
|
280
|
+
/**
|
281
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
282
|
+
*/
|
283
|
+
safetySettings: z2.array(
|
284
|
+
z2.object({
|
285
|
+
category: z2.enum([
|
286
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
287
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
288
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
289
|
+
"HARM_CATEGORY_HARASSMENT",
|
290
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
291
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
292
|
+
]),
|
293
|
+
threshold: z2.enum([
|
294
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
295
|
+
"BLOCK_LOW_AND_ABOVE",
|
296
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
297
|
+
"BLOCK_ONLY_HIGH",
|
298
|
+
"BLOCK_NONE",
|
299
|
+
"OFF"
|
300
|
+
])
|
301
|
+
})
|
302
|
+
).optional(),
|
303
|
+
threshold: z2.enum([
|
304
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
305
|
+
"BLOCK_LOW_AND_ABOVE",
|
306
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
307
|
+
"BLOCK_ONLY_HIGH",
|
308
|
+
"BLOCK_NONE",
|
309
|
+
"OFF"
|
310
|
+
]).optional(),
|
311
|
+
/**
|
312
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
313
|
+
*
|
314
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
315
|
+
*/
|
316
|
+
audioTimestamp: z2.boolean().optional(),
|
317
|
+
/**
|
318
|
+
Optional. When enabled, the model will use Google search to ground the response.
|
319
|
+
|
320
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
|
321
|
+
*/
|
322
|
+
useSearchGrounding: z2.boolean().optional(),
|
323
|
+
/**
|
324
|
+
Optional. Specifies the dynamic retrieval configuration.
|
325
|
+
|
326
|
+
@note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
|
327
|
+
|
328
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
|
329
|
+
*/
|
330
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
|
331
|
+
});
|
332
|
+
|
260
333
|
// src/google-prepare-tools.ts
|
261
334
|
import {
|
262
335
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
263
336
|
} from "@ai-sdk/provider";
|
264
|
-
function prepareTools(
|
265
|
-
|
266
|
-
|
337
|
+
function prepareTools({
|
338
|
+
tools,
|
339
|
+
toolChoice,
|
340
|
+
useSearchGrounding,
|
341
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig2,
|
342
|
+
modelId
|
343
|
+
}) {
|
344
|
+
var _a;
|
345
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
267
346
|
const toolWarnings = [];
|
268
347
|
const isGemini2 = modelId.includes("gemini-2");
|
269
348
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
270
349
|
if (useSearchGrounding) {
|
271
350
|
return {
|
272
351
|
tools: isGemini2 ? { googleSearch: {} } : {
|
273
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !
|
352
|
+
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
|
274
353
|
},
|
275
354
|
toolConfig: void 0,
|
276
355
|
toolWarnings
|
@@ -286,12 +365,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
286
365
|
} else {
|
287
366
|
functionDeclarations.push({
|
288
367
|
name: tool.name,
|
289
|
-
description: (
|
368
|
+
description: (_a = tool.description) != null ? _a : "",
|
290
369
|
parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
|
291
370
|
});
|
292
371
|
}
|
293
372
|
}
|
294
|
-
const toolChoice = mode.toolChoice;
|
295
373
|
if (toolChoice == null) {
|
296
374
|
return {
|
297
375
|
tools: { functionDeclarations },
|
@@ -333,7 +411,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
333
411
|
default: {
|
334
412
|
const _exhaustiveCheck = type;
|
335
413
|
throw new UnsupportedFunctionalityError2({
|
336
|
-
functionality: `
|
414
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
337
415
|
});
|
338
416
|
}
|
339
417
|
}
|
@@ -368,25 +446,21 @@ function mapGoogleGenerativeAIFinishReason({
|
|
368
446
|
|
369
447
|
// src/google-generative-ai-language-model.ts
|
370
448
|
var GoogleGenerativeAILanguageModel = class {
|
371
|
-
constructor(modelId,
|
372
|
-
this.specificationVersion = "
|
373
|
-
this.defaultObjectGenerationMode = "json";
|
374
|
-
this.supportsImageUrls = false;
|
449
|
+
constructor(modelId, config) {
|
450
|
+
this.specificationVersion = "v2";
|
375
451
|
this.modelId = modelId;
|
376
|
-
this.settings = settings;
|
377
452
|
this.config = config;
|
378
453
|
}
|
379
|
-
get supportsStructuredOutputs() {
|
380
|
-
var _a;
|
381
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
382
|
-
}
|
383
454
|
get provider() {
|
384
455
|
return this.config.provider;
|
385
456
|
}
|
457
|
+
get supportedUrls() {
|
458
|
+
var _a, _b, _c;
|
459
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
460
|
+
}
|
386
461
|
async getArgs({
|
387
|
-
mode,
|
388
462
|
prompt,
|
389
|
-
|
463
|
+
maxOutputTokens,
|
390
464
|
temperature,
|
391
465
|
topP,
|
392
466
|
topK,
|
@@ -395,15 +469,16 @@ var GoogleGenerativeAILanguageModel = class {
|
|
395
469
|
stopSequences,
|
396
470
|
responseFormat,
|
397
471
|
seed,
|
398
|
-
|
472
|
+
tools,
|
473
|
+
toolChoice,
|
474
|
+
providerOptions
|
399
475
|
}) {
|
400
476
|
var _a, _b, _c;
|
401
|
-
const type = mode.type;
|
402
477
|
const warnings = [];
|
403
|
-
const googleOptions = parseProviderOptions({
|
478
|
+
const googleOptions = await parseProviderOptions({
|
404
479
|
provider: "google",
|
405
|
-
providerOptions
|
406
|
-
schema:
|
480
|
+
providerOptions,
|
481
|
+
schema: googleGenerativeAIProviderOptions
|
407
482
|
});
|
408
483
|
if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
|
409
484
|
warnings.push({
|
@@ -411,102 +486,55 @@ var GoogleGenerativeAILanguageModel = class {
|
|
411
486
|
message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
|
412
487
|
});
|
413
488
|
}
|
414
|
-
const generationConfig = {
|
415
|
-
// standardized settings:
|
416
|
-
maxOutputTokens: maxTokens,
|
417
|
-
temperature,
|
418
|
-
topK,
|
419
|
-
topP,
|
420
|
-
frequencyPenalty,
|
421
|
-
presencePenalty,
|
422
|
-
stopSequences,
|
423
|
-
seed,
|
424
|
-
// response format:
|
425
|
-
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
426
|
-
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
427
|
-
// so this is needed as an escape hatch:
|
428
|
-
this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
429
|
-
...this.settings.audioTimestamp && {
|
430
|
-
audioTimestamp: this.settings.audioTimestamp
|
431
|
-
},
|
432
|
-
// provider options:
|
433
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
434
|
-
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
435
|
-
};
|
436
489
|
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
contents,
|
469
|
-
systemInstruction,
|
470
|
-
safetySettings: this.settings.safetySettings,
|
471
|
-
cachedContent: this.settings.cachedContent
|
472
|
-
},
|
473
|
-
warnings
|
474
|
-
};
|
475
|
-
}
|
476
|
-
case "object-tool": {
|
477
|
-
return {
|
478
|
-
args: {
|
479
|
-
generationConfig,
|
480
|
-
contents,
|
481
|
-
tools: {
|
482
|
-
functionDeclarations: [
|
483
|
-
{
|
484
|
-
name: mode.tool.name,
|
485
|
-
description: (_c = mode.tool.description) != null ? _c : "",
|
486
|
-
parameters: convertJSONSchemaToOpenAPISchema(
|
487
|
-
mode.tool.parameters
|
488
|
-
)
|
489
|
-
}
|
490
|
-
]
|
491
|
-
},
|
492
|
-
toolConfig: { functionCallingConfig: { mode: "ANY" } },
|
493
|
-
safetySettings: this.settings.safetySettings,
|
494
|
-
cachedContent: this.settings.cachedContent
|
490
|
+
const {
|
491
|
+
tools: googleTools,
|
492
|
+
toolConfig: googleToolConfig,
|
493
|
+
toolWarnings
|
494
|
+
} = prepareTools({
|
495
|
+
tools,
|
496
|
+
toolChoice,
|
497
|
+
useSearchGrounding: (_b = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _b : false,
|
498
|
+
dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
|
499
|
+
modelId: this.modelId
|
500
|
+
});
|
501
|
+
return {
|
502
|
+
args: {
|
503
|
+
generationConfig: {
|
504
|
+
// standardized settings:
|
505
|
+
maxOutputTokens,
|
506
|
+
temperature,
|
507
|
+
topK,
|
508
|
+
topP,
|
509
|
+
frequencyPenalty,
|
510
|
+
presencePenalty,
|
511
|
+
stopSequences,
|
512
|
+
seed,
|
513
|
+
// response format:
|
514
|
+
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
515
|
+
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
516
|
+
// so this is needed as an escape hatch:
|
517
|
+
// TODO convert into provider option
|
518
|
+
((_c = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _c : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
519
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
520
|
+
audioTimestamp: googleOptions.audioTimestamp
|
495
521
|
},
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
522
|
+
// provider options:
|
523
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
524
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
525
|
+
},
|
526
|
+
contents,
|
527
|
+
systemInstruction,
|
528
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
529
|
+
tools: googleTools,
|
530
|
+
toolConfig: googleToolConfig,
|
531
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
532
|
+
},
|
533
|
+
warnings: [...warnings, ...toolWarnings]
|
534
|
+
};
|
507
535
|
}
|
508
536
|
async doGenerate(options) {
|
509
|
-
var _a, _b, _c, _d, _e;
|
537
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
510
538
|
const { args, warnings } = await this.getArgs(options);
|
511
539
|
const body = JSON.stringify(args);
|
512
540
|
const mergedHeaders = combineHeaders(
|
@@ -528,45 +556,66 @@ var GoogleGenerativeAILanguageModel = class {
|
|
528
556
|
abortSignal: options.abortSignal,
|
529
557
|
fetch: this.config.fetch
|
530
558
|
});
|
531
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
532
559
|
const candidate = response.candidates[0];
|
533
|
-
const
|
534
|
-
const
|
535
|
-
parts,
|
536
|
-
// Use candidateParts
|
537
|
-
generateId: this.config.generateId
|
538
|
-
});
|
560
|
+
const content = [];
|
561
|
+
const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
|
539
562
|
const usageMetadata = response.usageMetadata;
|
563
|
+
for (const part of parts) {
|
564
|
+
if ("text" in part && part.text != null && part.text.length > 0) {
|
565
|
+
if (part.thought === true) {
|
566
|
+
content.push({ type: "reasoning", text: part.text });
|
567
|
+
} else {
|
568
|
+
content.push({ type: "text", text: part.text });
|
569
|
+
}
|
570
|
+
} else if ("functionCall" in part) {
|
571
|
+
content.push({
|
572
|
+
type: "tool-call",
|
573
|
+
toolCallType: "function",
|
574
|
+
toolCallId: this.config.generateId(),
|
575
|
+
toolName: part.functionCall.name,
|
576
|
+
args: JSON.stringify(part.functionCall.args)
|
577
|
+
});
|
578
|
+
} else if ("inlineData" in part) {
|
579
|
+
content.push({
|
580
|
+
type: "file",
|
581
|
+
data: part.inlineData.data,
|
582
|
+
mediaType: part.inlineData.mimeType
|
583
|
+
});
|
584
|
+
}
|
585
|
+
}
|
586
|
+
const sources = (_b = extractSources({
|
587
|
+
groundingMetadata: candidate.groundingMetadata,
|
588
|
+
generateId: this.config.generateId
|
589
|
+
})) != null ? _b : [];
|
590
|
+
for (const source of sources) {
|
591
|
+
content.push(source);
|
592
|
+
}
|
540
593
|
return {
|
541
|
-
|
542
|
-
reasoning: getReasoningDetailsFromParts(parts),
|
543
|
-
files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
|
544
|
-
data: part.inlineData.data,
|
545
|
-
mimeType: part.inlineData.mimeType
|
546
|
-
})),
|
547
|
-
toolCalls,
|
594
|
+
content,
|
548
595
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
549
596
|
finishReason: candidate.finishReason,
|
550
|
-
hasToolCalls:
|
597
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
551
598
|
}),
|
552
599
|
usage: {
|
553
|
-
|
554
|
-
|
600
|
+
inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
|
601
|
+
outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
|
602
|
+
totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
|
603
|
+
reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
|
604
|
+
cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
|
555
605
|
},
|
556
|
-
rawCall: { rawPrompt, rawSettings },
|
557
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
558
606
|
warnings,
|
559
607
|
providerMetadata: {
|
560
608
|
google: {
|
561
|
-
groundingMetadata: (
|
562
|
-
safetyRatings: (
|
609
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
610
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
563
611
|
}
|
564
612
|
},
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
613
|
+
request: { body },
|
614
|
+
response: {
|
615
|
+
// TODO timestamp, model id, id
|
616
|
+
headers: responseHeaders,
|
617
|
+
body: rawResponse
|
618
|
+
}
|
570
619
|
};
|
571
620
|
}
|
572
621
|
async doStream(options) {
|
@@ -587,11 +636,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
587
636
|
abortSignal: options.abortSignal,
|
588
637
|
fetch: this.config.fetch
|
589
638
|
});
|
590
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
591
639
|
let finishReason = "unknown";
|
592
|
-
|
593
|
-
|
594
|
-
|
640
|
+
const usage = {
|
641
|
+
inputTokens: void 0,
|
642
|
+
outputTokens: void 0,
|
643
|
+
totalTokens: void 0
|
595
644
|
};
|
596
645
|
let providerMetadata = void 0;
|
597
646
|
const generateId = this.config.generateId;
|
@@ -599,8 +648,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
599
648
|
return {
|
600
649
|
stream: response.pipeThrough(
|
601
650
|
new TransformStream({
|
651
|
+
start(controller) {
|
652
|
+
controller.enqueue({ type: "stream-start", warnings });
|
653
|
+
},
|
602
654
|
transform(chunk, controller) {
|
603
|
-
var _a, _b, _c, _d, _e, _f;
|
655
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
604
656
|
if (!chunk.success) {
|
605
657
|
controller.enqueue({ type: "error", error: chunk.error });
|
606
658
|
return;
|
@@ -608,33 +660,26 @@ var GoogleGenerativeAILanguageModel = class {
|
|
608
660
|
const value = chunk.value;
|
609
661
|
const usageMetadata = value.usageMetadata;
|
610
662
|
if (usageMetadata != null) {
|
611
|
-
usage =
|
612
|
-
|
613
|
-
|
614
|
-
|
663
|
+
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
664
|
+
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
665
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
666
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
667
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
615
668
|
}
|
616
|
-
const candidate = (
|
669
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
617
670
|
if (candidate == null) {
|
618
671
|
return;
|
619
672
|
}
|
620
673
|
const content = candidate.content;
|
621
674
|
if (content != null) {
|
622
|
-
const
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
content.parts
|
631
|
-
);
|
632
|
-
if (reasoningDeltaText != null) {
|
633
|
-
for (const part of reasoningDeltaText) {
|
634
|
-
controller.enqueue({
|
635
|
-
type: "reasoning",
|
636
|
-
textDelta: part.text
|
637
|
-
});
|
675
|
+
const parts = (_g = content.parts) != null ? _g : [];
|
676
|
+
for (const part of parts) {
|
677
|
+
if ("text" in part && part.text != null && part.text.length > 0) {
|
678
|
+
if (part.thought === true) {
|
679
|
+
controller.enqueue({ type: "reasoning", text: part.text });
|
680
|
+
} else {
|
681
|
+
controller.enqueue({ type: "text", text: part.text });
|
682
|
+
}
|
638
683
|
}
|
639
684
|
}
|
640
685
|
const inlineDataParts = getInlineDataParts(content.parts);
|
@@ -642,7 +687,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
642
687
|
for (const part of inlineDataParts) {
|
643
688
|
controller.enqueue({
|
644
689
|
type: "file",
|
645
|
-
|
690
|
+
mediaType: part.inlineData.mimeType,
|
646
691
|
data: part.inlineData.data
|
647
692
|
});
|
648
693
|
}
|
@@ -676,17 +721,17 @@ var GoogleGenerativeAILanguageModel = class {
|
|
676
721
|
finishReason: candidate.finishReason,
|
677
722
|
hasToolCalls
|
678
723
|
});
|
679
|
-
const sources = (
|
724
|
+
const sources = (_h = extractSources({
|
680
725
|
groundingMetadata: candidate.groundingMetadata,
|
681
726
|
generateId
|
682
|
-
})) != null ?
|
727
|
+
})) != null ? _h : [];
|
683
728
|
for (const source of sources) {
|
684
|
-
controller.enqueue(
|
729
|
+
controller.enqueue(source);
|
685
730
|
}
|
686
731
|
providerMetadata = {
|
687
732
|
google: {
|
688
|
-
groundingMetadata: (
|
689
|
-
safetyRatings: (
|
733
|
+
groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
|
734
|
+
safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
|
690
735
|
}
|
691
736
|
};
|
692
737
|
}
|
@@ -701,9 +746,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
701
746
|
}
|
702
747
|
})
|
703
748
|
),
|
704
|
-
|
705
|
-
rawResponse: { headers: responseHeaders },
|
706
|
-
warnings,
|
749
|
+
response: { headers: responseHeaders },
|
707
750
|
request: { body }
|
708
751
|
};
|
709
752
|
}
|
@@ -716,24 +759,13 @@ function getToolCallsFromParts({
|
|
716
759
|
(part) => "functionCall" in part
|
717
760
|
);
|
718
761
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
762
|
+
type: "tool-call",
|
719
763
|
toolCallType: "function",
|
720
764
|
toolCallId: generateId(),
|
721
765
|
toolName: part.functionCall.name,
|
722
766
|
args: JSON.stringify(part.functionCall.args)
|
723
767
|
}));
|
724
768
|
}
|
725
|
-
function getTextFromParts(parts) {
|
726
|
-
const textParts = parts == null ? void 0 : parts.filter(
|
727
|
-
(part) => "text" in part && part.thought !== true
|
728
|
-
);
|
729
|
-
return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
|
730
|
-
}
|
731
|
-
function getReasoningDetailsFromParts(parts) {
|
732
|
-
const reasoningParts = parts == null ? void 0 : parts.filter(
|
733
|
-
(part) => "text" in part && part.thought === true && part.text != null
|
734
|
-
);
|
735
|
-
return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
|
736
|
-
}
|
737
769
|
function getInlineDataParts(parts) {
|
738
770
|
return parts == null ? void 0 : parts.filter(
|
739
771
|
(part) => "inlineData" in part
|
@@ -747,109 +779,102 @@ function extractSources({
|
|
747
779
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
748
780
|
(chunk) => chunk.web != null
|
749
781
|
).map((chunk) => ({
|
782
|
+
type: "source",
|
750
783
|
sourceType: "url",
|
751
784
|
id: generateId(),
|
752
785
|
url: chunk.web.uri,
|
753
786
|
title: chunk.web.title
|
754
787
|
}));
|
755
788
|
}
|
756
|
-
var contentSchema =
|
757
|
-
parts:
|
758
|
-
|
789
|
+
var contentSchema = z3.object({
|
790
|
+
parts: z3.array(
|
791
|
+
z3.union([
|
759
792
|
// note: order matters since text can be fully empty
|
760
|
-
|
761
|
-
functionCall:
|
762
|
-
name:
|
763
|
-
args:
|
793
|
+
z3.object({
|
794
|
+
functionCall: z3.object({
|
795
|
+
name: z3.string(),
|
796
|
+
args: z3.unknown()
|
764
797
|
})
|
765
798
|
}),
|
766
|
-
|
767
|
-
inlineData:
|
768
|
-
mimeType:
|
769
|
-
data:
|
799
|
+
z3.object({
|
800
|
+
inlineData: z3.object({
|
801
|
+
mimeType: z3.string(),
|
802
|
+
data: z3.string()
|
770
803
|
})
|
771
804
|
}),
|
772
|
-
|
773
|
-
text:
|
774
|
-
thought:
|
805
|
+
z3.object({
|
806
|
+
text: z3.string().nullish(),
|
807
|
+
thought: z3.boolean().nullish()
|
775
808
|
})
|
776
809
|
])
|
777
810
|
).nullish()
|
778
811
|
});
|
779
|
-
var groundingChunkSchema =
|
780
|
-
web:
|
781
|
-
retrievedContext:
|
812
|
+
var groundingChunkSchema = z3.object({
|
813
|
+
web: z3.object({ uri: z3.string(), title: z3.string() }).nullish(),
|
814
|
+
retrievedContext: z3.object({ uri: z3.string(), title: z3.string() }).nullish()
|
782
815
|
});
|
783
|
-
var groundingMetadataSchema =
|
784
|
-
webSearchQueries:
|
785
|
-
retrievalQueries:
|
786
|
-
searchEntryPoint:
|
787
|
-
groundingChunks:
|
788
|
-
groundingSupports:
|
789
|
-
|
790
|
-
segment:
|
791
|
-
startIndex:
|
792
|
-
endIndex:
|
793
|
-
text:
|
816
|
+
var groundingMetadataSchema = z3.object({
|
817
|
+
webSearchQueries: z3.array(z3.string()).nullish(),
|
818
|
+
retrievalQueries: z3.array(z3.string()).nullish(),
|
819
|
+
searchEntryPoint: z3.object({ renderedContent: z3.string() }).nullish(),
|
820
|
+
groundingChunks: z3.array(groundingChunkSchema).nullish(),
|
821
|
+
groundingSupports: z3.array(
|
822
|
+
z3.object({
|
823
|
+
segment: z3.object({
|
824
|
+
startIndex: z3.number().nullish(),
|
825
|
+
endIndex: z3.number().nullish(),
|
826
|
+
text: z3.string().nullish()
|
794
827
|
}),
|
795
|
-
segment_text:
|
796
|
-
groundingChunkIndices:
|
797
|
-
supportChunkIndices:
|
798
|
-
confidenceScores:
|
799
|
-
confidenceScore:
|
828
|
+
segment_text: z3.string().nullish(),
|
829
|
+
groundingChunkIndices: z3.array(z3.number()).nullish(),
|
830
|
+
supportChunkIndices: z3.array(z3.number()).nullish(),
|
831
|
+
confidenceScores: z3.array(z3.number()).nullish(),
|
832
|
+
confidenceScore: z3.array(z3.number()).nullish()
|
800
833
|
})
|
801
834
|
).nullish(),
|
802
|
-
retrievalMetadata:
|
803
|
-
|
804
|
-
webDynamicRetrievalScore:
|
835
|
+
retrievalMetadata: z3.union([
|
836
|
+
z3.object({
|
837
|
+
webDynamicRetrievalScore: z3.number()
|
805
838
|
}),
|
806
|
-
|
839
|
+
z3.object({})
|
807
840
|
]).nullish()
|
808
841
|
});
|
809
|
-
var safetyRatingSchema =
|
810
|
-
category:
|
811
|
-
probability:
|
812
|
-
probabilityScore:
|
813
|
-
severity:
|
814
|
-
severityScore:
|
815
|
-
blocked:
|
842
|
+
var safetyRatingSchema = z3.object({
|
843
|
+
category: z3.string().nullish(),
|
844
|
+
probability: z3.string().nullish(),
|
845
|
+
probabilityScore: z3.number().nullish(),
|
846
|
+
severity: z3.string().nullish(),
|
847
|
+
severityScore: z3.number().nullish(),
|
848
|
+
blocked: z3.boolean().nullish()
|
816
849
|
});
|
817
|
-
var
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
850
|
+
var usageSchema = z3.object({
|
851
|
+
cachedContentTokenCount: z3.number().nullish(),
|
852
|
+
thoughtsTokenCount: z3.number().nullish(),
|
853
|
+
promptTokenCount: z3.number().nullish(),
|
854
|
+
candidatesTokenCount: z3.number().nullish(),
|
855
|
+
totalTokenCount: z3.number().nullish()
|
856
|
+
});
|
857
|
+
var responseSchema = z3.object({
|
858
|
+
candidates: z3.array(
|
859
|
+
z3.object({
|
860
|
+
content: contentSchema.nullish().or(z3.object({}).strict()),
|
861
|
+
finishReason: z3.string().nullish(),
|
862
|
+
safetyRatings: z3.array(safetyRatingSchema).nullish(),
|
823
863
|
groundingMetadata: groundingMetadataSchema.nullish()
|
824
864
|
})
|
825
865
|
),
|
826
|
-
usageMetadata:
|
827
|
-
promptTokenCount: z2.number().nullish(),
|
828
|
-
candidatesTokenCount: z2.number().nullish(),
|
829
|
-
totalTokenCount: z2.number().nullish()
|
830
|
-
}).nullish()
|
866
|
+
usageMetadata: usageSchema.nullish()
|
831
867
|
});
|
832
|
-
var chunkSchema =
|
833
|
-
candidates:
|
834
|
-
|
868
|
+
var chunkSchema = z3.object({
|
869
|
+
candidates: z3.array(
|
870
|
+
z3.object({
|
835
871
|
content: contentSchema.nullish(),
|
836
|
-
finishReason:
|
837
|
-
safetyRatings:
|
872
|
+
finishReason: z3.string().nullish(),
|
873
|
+
safetyRatings: z3.array(safetyRatingSchema).nullish(),
|
838
874
|
groundingMetadata: groundingMetadataSchema.nullish()
|
839
875
|
})
|
840
876
|
).nullish(),
|
841
|
-
usageMetadata:
|
842
|
-
promptTokenCount: z2.number().nullish(),
|
843
|
-
candidatesTokenCount: z2.number().nullish(),
|
844
|
-
totalTokenCount: z2.number().nullish()
|
845
|
-
}).nullish()
|
846
|
-
});
|
847
|
-
var googleGenerativeAIProviderOptionsSchema = z2.object({
|
848
|
-
responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).nullish(),
|
849
|
-
thinkingConfig: z2.object({
|
850
|
-
thinkingBudget: z2.number().nullish(),
|
851
|
-
includeThoughts: z2.boolean().nullish()
|
852
|
-
}).nullish()
|
877
|
+
usageMetadata: usageSchema.nullish()
|
853
878
|
});
|
854
879
|
export {
|
855
880
|
GoogleGenerativeAILanguageModel,
|