@ai-sdk/google 1.2.18 → 2.0.0-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +176 -34
- package/dist/index.d.mts +102 -91
- package/dist/index.d.ts +102 -91
- package/dist/index.js +449 -394
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +454 -394
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +13 -97
- package/{internal/dist → dist/internal}/index.d.ts +13 -97
- package/{internal/dist → dist/internal}/index.js +301 -289
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +304 -290
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
@@ -7,11 +7,11 @@ import {
|
|
7
7
|
postJsonToApi,
|
8
8
|
resolve
|
9
9
|
} from "@ai-sdk/provider-utils";
|
10
|
-
import { z as
|
10
|
+
import { z as z3 } from "zod";
|
11
11
|
|
12
12
|
// src/convert-json-schema-to-openapi-schema.ts
|
13
13
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
14
|
-
if (isEmptyObjectSchema(jsonSchema)) {
|
14
|
+
if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
|
15
15
|
return void 0;
|
16
16
|
}
|
17
17
|
if (typeof jsonSchema === "boolean") {
|
@@ -110,9 +110,10 @@ function isEmptyObjectSchema(jsonSchema) {
|
|
110
110
|
import {
|
111
111
|
UnsupportedFunctionalityError
|
112
112
|
} from "@ai-sdk/provider";
|
113
|
-
import {
|
113
|
+
import {
|
114
|
+
convertToBase64
|
115
|
+
} from "@ai-sdk/provider-utils";
|
114
116
|
function convertToGoogleGenerativeAIMessages(prompt) {
|
115
|
-
var _a, _b;
|
116
117
|
const systemInstructionParts = [];
|
117
118
|
const contents = [];
|
118
119
|
let systemMessagesAllowed = true;
|
@@ -136,33 +137,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
136
137
|
parts.push({ text: part.text });
|
137
138
|
break;
|
138
139
|
}
|
139
|
-
case "image": {
|
140
|
-
parts.push(
|
141
|
-
part.image instanceof URL ? {
|
142
|
-
fileData: {
|
143
|
-
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
|
144
|
-
fileUri: part.image.toString()
|
145
|
-
}
|
146
|
-
} : {
|
147
|
-
inlineData: {
|
148
|
-
mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
|
149
|
-
data: convertUint8ArrayToBase64(part.image)
|
150
|
-
}
|
151
|
-
}
|
152
|
-
);
|
153
|
-
break;
|
154
|
-
}
|
155
140
|
case "file": {
|
141
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
156
142
|
parts.push(
|
157
143
|
part.data instanceof URL ? {
|
158
144
|
fileData: {
|
159
|
-
mimeType:
|
145
|
+
mimeType: mediaType,
|
160
146
|
fileUri: part.data.toString()
|
161
147
|
}
|
162
148
|
} : {
|
163
149
|
inlineData: {
|
164
|
-
mimeType:
|
165
|
-
data: part.data
|
150
|
+
mimeType: mediaType,
|
151
|
+
data: convertToBase64(part.data)
|
166
152
|
}
|
167
153
|
}
|
168
154
|
);
|
@@ -183,7 +169,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
183
169
|
return part.text.length === 0 ? void 0 : { text: part.text };
|
184
170
|
}
|
185
171
|
case "file": {
|
186
|
-
if (part.
|
172
|
+
if (part.mediaType !== "image/png") {
|
187
173
|
throw new UnsupportedFunctionalityError({
|
188
174
|
functionality: "Only PNG images are supported in assistant messages"
|
189
175
|
});
|
@@ -195,8 +181,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
195
181
|
}
|
196
182
|
return {
|
197
183
|
inlineData: {
|
198
|
-
mimeType: part.
|
199
|
-
data: part.data
|
184
|
+
mimeType: part.mediaType,
|
185
|
+
data: convertToBase64(part.data)
|
200
186
|
}
|
201
187
|
};
|
202
188
|
}
|
@@ -257,20 +243,112 @@ var googleFailedResponseHandler = createJsonErrorResponseHandler({
|
|
257
243
|
errorToMessage: (data) => data.error.message
|
258
244
|
});
|
259
245
|
|
246
|
+
// src/google-generative-ai-options.ts
|
247
|
+
import { z as z2 } from "zod";
|
248
|
+
var dynamicRetrievalConfig = z2.object({
|
249
|
+
/**
|
250
|
+
* The mode of the predictor to be used in dynamic retrieval.
|
251
|
+
*/
|
252
|
+
mode: z2.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
|
253
|
+
/**
|
254
|
+
* The threshold to be used in dynamic retrieval. If not set, a system default
|
255
|
+
* value is used.
|
256
|
+
*/
|
257
|
+
dynamicThreshold: z2.number().optional()
|
258
|
+
});
|
259
|
+
var googleGenerativeAIProviderOptions = z2.object({
|
260
|
+
responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).optional(),
|
261
|
+
thinkingConfig: z2.object({
|
262
|
+
thinkingBudget: z2.number().optional()
|
263
|
+
}).optional(),
|
264
|
+
/**
|
265
|
+
Optional.
|
266
|
+
The name of the cached content used as context to serve the prediction.
|
267
|
+
Format: cachedContents/{cachedContent}
|
268
|
+
*/
|
269
|
+
cachedContent: z2.string().optional(),
|
270
|
+
/**
|
271
|
+
* Optional. Enable structured output. Default is true.
|
272
|
+
*
|
273
|
+
* This is useful when the JSON Schema contains elements that are
|
274
|
+
* not supported by the OpenAPI schema version that
|
275
|
+
* Google Generative AI uses. You can use this to disable
|
276
|
+
* structured outputs if you need to.
|
277
|
+
*/
|
278
|
+
structuredOutputs: z2.boolean().optional(),
|
279
|
+
/**
|
280
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
281
|
+
*/
|
282
|
+
safetySettings: z2.array(
|
283
|
+
z2.object({
|
284
|
+
category: z2.enum([
|
285
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
286
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
287
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
288
|
+
"HARM_CATEGORY_HARASSMENT",
|
289
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
290
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
291
|
+
]),
|
292
|
+
threshold: z2.enum([
|
293
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
294
|
+
"BLOCK_LOW_AND_ABOVE",
|
295
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
296
|
+
"BLOCK_ONLY_HIGH",
|
297
|
+
"BLOCK_NONE",
|
298
|
+
"OFF"
|
299
|
+
])
|
300
|
+
})
|
301
|
+
).optional(),
|
302
|
+
threshold: z2.enum([
|
303
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
304
|
+
"BLOCK_LOW_AND_ABOVE",
|
305
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
306
|
+
"BLOCK_ONLY_HIGH",
|
307
|
+
"BLOCK_NONE",
|
308
|
+
"OFF"
|
309
|
+
]).optional(),
|
310
|
+
/**
|
311
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
312
|
+
*
|
313
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
314
|
+
*/
|
315
|
+
audioTimestamp: z2.boolean().optional(),
|
316
|
+
/**
|
317
|
+
Optional. When enabled, the model will use Google search to ground the response.
|
318
|
+
|
319
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
|
320
|
+
*/
|
321
|
+
useSearchGrounding: z2.boolean().optional(),
|
322
|
+
/**
|
323
|
+
Optional. Specifies the dynamic retrieval configuration.
|
324
|
+
|
325
|
+
@note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
|
326
|
+
|
327
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
|
328
|
+
*/
|
329
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
|
330
|
+
});
|
331
|
+
|
260
332
|
// src/google-prepare-tools.ts
|
261
333
|
import {
|
262
334
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
263
335
|
} from "@ai-sdk/provider";
|
264
|
-
function prepareTools(
|
265
|
-
|
266
|
-
|
336
|
+
function prepareTools({
|
337
|
+
tools,
|
338
|
+
toolChoice,
|
339
|
+
useSearchGrounding,
|
340
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig2,
|
341
|
+
modelId
|
342
|
+
}) {
|
343
|
+
var _a;
|
344
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
267
345
|
const toolWarnings = [];
|
268
346
|
const isGemini2 = modelId.includes("gemini-2");
|
269
347
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
270
348
|
if (useSearchGrounding) {
|
271
349
|
return {
|
272
350
|
tools: isGemini2 ? { googleSearch: {} } : {
|
273
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !
|
351
|
+
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
|
274
352
|
},
|
275
353
|
toolConfig: void 0,
|
276
354
|
toolWarnings
|
@@ -286,12 +364,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
286
364
|
} else {
|
287
365
|
functionDeclarations.push({
|
288
366
|
name: tool.name,
|
289
|
-
description: (
|
367
|
+
description: (_a = tool.description) != null ? _a : "",
|
290
368
|
parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
|
291
369
|
});
|
292
370
|
}
|
293
371
|
}
|
294
|
-
const toolChoice = mode.toolChoice;
|
295
372
|
if (toolChoice == null) {
|
296
373
|
return {
|
297
374
|
tools: { functionDeclarations },
|
@@ -333,7 +410,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
333
410
|
default: {
|
334
411
|
const _exhaustiveCheck = type;
|
335
412
|
throw new UnsupportedFunctionalityError2({
|
336
|
-
functionality: `
|
413
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
337
414
|
});
|
338
415
|
}
|
339
416
|
}
|
@@ -368,25 +445,21 @@ function mapGoogleGenerativeAIFinishReason({
|
|
368
445
|
|
369
446
|
// src/google-generative-ai-language-model.ts
|
370
447
|
var GoogleGenerativeAILanguageModel = class {
|
371
|
-
constructor(modelId,
|
372
|
-
this.specificationVersion = "
|
373
|
-
this.defaultObjectGenerationMode = "json";
|
374
|
-
this.supportsImageUrls = false;
|
448
|
+
constructor(modelId, config) {
|
449
|
+
this.specificationVersion = "v2";
|
375
450
|
this.modelId = modelId;
|
376
|
-
this.settings = settings;
|
377
451
|
this.config = config;
|
378
452
|
}
|
379
|
-
get supportsStructuredOutputs() {
|
380
|
-
var _a;
|
381
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
382
|
-
}
|
383
453
|
get provider() {
|
384
454
|
return this.config.provider;
|
385
455
|
}
|
456
|
+
get supportedUrls() {
|
457
|
+
var _a, _b, _c;
|
458
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
459
|
+
}
|
386
460
|
async getArgs({
|
387
|
-
mode,
|
388
461
|
prompt,
|
389
|
-
|
462
|
+
maxOutputTokens,
|
390
463
|
temperature,
|
391
464
|
topP,
|
392
465
|
topK,
|
@@ -395,118 +468,66 @@ var GoogleGenerativeAILanguageModel = class {
|
|
395
468
|
stopSequences,
|
396
469
|
responseFormat,
|
397
470
|
seed,
|
398
|
-
|
471
|
+
tools,
|
472
|
+
toolChoice,
|
473
|
+
providerOptions
|
399
474
|
}) {
|
400
|
-
var _a, _b
|
401
|
-
const type = mode.type;
|
475
|
+
var _a, _b;
|
402
476
|
const warnings = [];
|
403
|
-
const googleOptions = parseProviderOptions({
|
477
|
+
const googleOptions = await parseProviderOptions({
|
404
478
|
provider: "google",
|
405
|
-
providerOptions
|
406
|
-
schema:
|
479
|
+
providerOptions,
|
480
|
+
schema: googleGenerativeAIProviderOptions
|
407
481
|
});
|
408
|
-
if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
|
409
|
-
warnings.push({
|
410
|
-
type: "other",
|
411
|
-
message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
|
412
|
-
});
|
413
|
-
}
|
414
|
-
const generationConfig = {
|
415
|
-
// standardized settings:
|
416
|
-
maxOutputTokens: maxTokens,
|
417
|
-
temperature,
|
418
|
-
topK,
|
419
|
-
topP,
|
420
|
-
frequencyPenalty,
|
421
|
-
presencePenalty,
|
422
|
-
stopSequences,
|
423
|
-
seed,
|
424
|
-
// response format:
|
425
|
-
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
426
|
-
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
427
|
-
// so this is needed as an escape hatch:
|
428
|
-
this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
429
|
-
...this.settings.audioTimestamp && {
|
430
|
-
audioTimestamp: this.settings.audioTimestamp
|
431
|
-
},
|
432
|
-
// provider options:
|
433
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
434
|
-
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
435
|
-
};
|
436
482
|
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
contents,
|
469
|
-
systemInstruction,
|
470
|
-
safetySettings: this.settings.safetySettings,
|
471
|
-
cachedContent: this.settings.cachedContent
|
472
|
-
},
|
473
|
-
warnings
|
474
|
-
};
|
475
|
-
}
|
476
|
-
case "object-tool": {
|
477
|
-
return {
|
478
|
-
args: {
|
479
|
-
generationConfig,
|
480
|
-
contents,
|
481
|
-
tools: {
|
482
|
-
functionDeclarations: [
|
483
|
-
{
|
484
|
-
name: mode.tool.name,
|
485
|
-
description: (_c = mode.tool.description) != null ? _c : "",
|
486
|
-
parameters: convertJSONSchemaToOpenAPISchema(
|
487
|
-
mode.tool.parameters
|
488
|
-
)
|
489
|
-
}
|
490
|
-
]
|
491
|
-
},
|
492
|
-
toolConfig: { functionCallingConfig: { mode: "ANY" } },
|
493
|
-
safetySettings: this.settings.safetySettings,
|
494
|
-
cachedContent: this.settings.cachedContent
|
483
|
+
const {
|
484
|
+
tools: googleTools,
|
485
|
+
toolConfig: googleToolConfig,
|
486
|
+
toolWarnings
|
487
|
+
} = prepareTools({
|
488
|
+
tools,
|
489
|
+
toolChoice,
|
490
|
+
useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
|
491
|
+
dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
|
492
|
+
modelId: this.modelId
|
493
|
+
});
|
494
|
+
return {
|
495
|
+
args: {
|
496
|
+
generationConfig: {
|
497
|
+
// standardized settings:
|
498
|
+
maxOutputTokens,
|
499
|
+
temperature,
|
500
|
+
topK,
|
501
|
+
topP,
|
502
|
+
frequencyPenalty,
|
503
|
+
presencePenalty,
|
504
|
+
stopSequences,
|
505
|
+
seed,
|
506
|
+
// response format:
|
507
|
+
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
508
|
+
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
509
|
+
// so this is needed as an escape hatch:
|
510
|
+
// TODO convert into provider option
|
511
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
512
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
513
|
+
audioTimestamp: googleOptions.audioTimestamp
|
495
514
|
},
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
515
|
+
// provider options:
|
516
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
517
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
518
|
+
},
|
519
|
+
contents,
|
520
|
+
systemInstruction,
|
521
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
522
|
+
tools: googleTools,
|
523
|
+
toolConfig: googleToolConfig,
|
524
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
525
|
+
},
|
526
|
+
warnings: [...warnings, ...toolWarnings]
|
527
|
+
};
|
507
528
|
}
|
508
529
|
async doGenerate(options) {
|
509
|
-
var _a, _b, _c, _d, _e;
|
530
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
510
531
|
const { args, warnings } = await this.getArgs(options);
|
511
532
|
const body = JSON.stringify(args);
|
512
533
|
const mergedHeaders = combineHeaders(
|
@@ -528,45 +549,62 @@ var GoogleGenerativeAILanguageModel = class {
|
|
528
549
|
abortSignal: options.abortSignal,
|
529
550
|
fetch: this.config.fetch
|
530
551
|
});
|
531
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
532
552
|
const candidate = response.candidates[0];
|
533
|
-
const
|
534
|
-
const
|
535
|
-
|
536
|
-
|
553
|
+
const content = [];
|
554
|
+
const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
|
555
|
+
for (const part of parts) {
|
556
|
+
if ("text" in part && part.text.length > 0) {
|
557
|
+
content.push({ type: "text", text: part.text });
|
558
|
+
} else if ("functionCall" in part) {
|
559
|
+
content.push({
|
560
|
+
type: "tool-call",
|
561
|
+
toolCallType: "function",
|
562
|
+
toolCallId: this.config.generateId(),
|
563
|
+
toolName: part.functionCall.name,
|
564
|
+
args: JSON.stringify(part.functionCall.args)
|
565
|
+
});
|
566
|
+
} else if ("inlineData" in part) {
|
567
|
+
content.push({
|
568
|
+
type: "file",
|
569
|
+
data: part.inlineData.data,
|
570
|
+
mediaType: part.inlineData.mimeType
|
571
|
+
});
|
572
|
+
}
|
573
|
+
}
|
574
|
+
const sources = (_b = extractSources({
|
575
|
+
groundingMetadata: candidate.groundingMetadata,
|
537
576
|
generateId: this.config.generateId
|
538
|
-
});
|
577
|
+
})) != null ? _b : [];
|
578
|
+
for (const source of sources) {
|
579
|
+
content.push(source);
|
580
|
+
}
|
539
581
|
const usageMetadata = response.usageMetadata;
|
540
582
|
return {
|
541
|
-
|
542
|
-
reasoning: getReasoningDetailsFromParts(parts),
|
543
|
-
files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
|
544
|
-
data: part.inlineData.data,
|
545
|
-
mimeType: part.inlineData.mimeType
|
546
|
-
})),
|
547
|
-
toolCalls,
|
583
|
+
content,
|
548
584
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
549
585
|
finishReason: candidate.finishReason,
|
550
|
-
hasToolCalls:
|
586
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
551
587
|
}),
|
552
588
|
usage: {
|
553
|
-
|
554
|
-
|
589
|
+
inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
|
590
|
+
outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
|
591
|
+
totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
|
592
|
+
reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
|
593
|
+
cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
|
555
594
|
},
|
556
|
-
rawCall: { rawPrompt, rawSettings },
|
557
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
558
595
|
warnings,
|
559
596
|
providerMetadata: {
|
560
597
|
google: {
|
561
|
-
groundingMetadata: (
|
562
|
-
safetyRatings: (
|
598
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
599
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
563
600
|
}
|
564
601
|
},
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
602
|
+
request: { body },
|
603
|
+
response: {
|
604
|
+
// TODO timestamp, model id, id
|
605
|
+
headers: responseHeaders,
|
606
|
+
body: rawResponse
|
607
|
+
}
|
570
608
|
};
|
571
609
|
}
|
572
610
|
async doStream(options) {
|
@@ -587,11 +625,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
587
625
|
abortSignal: options.abortSignal,
|
588
626
|
fetch: this.config.fetch
|
589
627
|
});
|
590
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
591
628
|
let finishReason = "unknown";
|
592
|
-
|
593
|
-
|
594
|
-
|
629
|
+
const usage = {
|
630
|
+
inputTokens: void 0,
|
631
|
+
outputTokens: void 0,
|
632
|
+
totalTokens: void 0
|
595
633
|
};
|
596
634
|
let providerMetadata = void 0;
|
597
635
|
const generateId = this.config.generateId;
|
@@ -599,8 +637,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
599
637
|
return {
|
600
638
|
stream: response.pipeThrough(
|
601
639
|
new TransformStream({
|
640
|
+
start(controller) {
|
641
|
+
controller.enqueue({ type: "stream-start", warnings });
|
642
|
+
},
|
602
643
|
transform(chunk, controller) {
|
603
|
-
var _a, _b, _c, _d, _e, _f;
|
644
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
604
645
|
if (!chunk.success) {
|
605
646
|
controller.enqueue({ type: "error", error: chunk.error });
|
606
647
|
return;
|
@@ -608,12 +649,13 @@ var GoogleGenerativeAILanguageModel = class {
|
|
608
649
|
const value = chunk.value;
|
609
650
|
const usageMetadata = value.usageMetadata;
|
610
651
|
if (usageMetadata != null) {
|
611
|
-
usage =
|
612
|
-
|
613
|
-
|
614
|
-
|
652
|
+
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
653
|
+
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
654
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
655
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
656
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
615
657
|
}
|
616
|
-
const candidate = (
|
658
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
617
659
|
if (candidate == null) {
|
618
660
|
return;
|
619
661
|
}
|
@@ -621,28 +663,14 @@ var GoogleGenerativeAILanguageModel = class {
|
|
621
663
|
if (content != null) {
|
622
664
|
const deltaText = getTextFromParts(content.parts);
|
623
665
|
if (deltaText != null) {
|
624
|
-
controller.enqueue(
|
625
|
-
type: "text-delta",
|
626
|
-
textDelta: deltaText
|
627
|
-
});
|
628
|
-
}
|
629
|
-
const reasoningDeltaText = getReasoningDetailsFromParts(
|
630
|
-
content.parts
|
631
|
-
);
|
632
|
-
if (reasoningDeltaText != null) {
|
633
|
-
for (const part of reasoningDeltaText) {
|
634
|
-
controller.enqueue({
|
635
|
-
type: "reasoning",
|
636
|
-
textDelta: part.text
|
637
|
-
});
|
638
|
-
}
|
666
|
+
controller.enqueue(deltaText);
|
639
667
|
}
|
640
668
|
const inlineDataParts = getInlineDataParts(content.parts);
|
641
669
|
if (inlineDataParts != null) {
|
642
670
|
for (const part of inlineDataParts) {
|
643
671
|
controller.enqueue({
|
644
672
|
type: "file",
|
645
|
-
|
673
|
+
mediaType: part.inlineData.mimeType,
|
646
674
|
data: part.inlineData.data
|
647
675
|
});
|
648
676
|
}
|
@@ -676,17 +704,17 @@ var GoogleGenerativeAILanguageModel = class {
|
|
676
704
|
finishReason: candidate.finishReason,
|
677
705
|
hasToolCalls
|
678
706
|
});
|
679
|
-
const sources = (
|
707
|
+
const sources = (_g = extractSources({
|
680
708
|
groundingMetadata: candidate.groundingMetadata,
|
681
709
|
generateId
|
682
|
-
})) != null ?
|
710
|
+
})) != null ? _g : [];
|
683
711
|
for (const source of sources) {
|
684
|
-
controller.enqueue(
|
712
|
+
controller.enqueue(source);
|
685
713
|
}
|
686
714
|
providerMetadata = {
|
687
715
|
google: {
|
688
|
-
groundingMetadata: (
|
689
|
-
safetyRatings: (
|
716
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
717
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
690
718
|
}
|
691
719
|
};
|
692
720
|
}
|
@@ -701,9 +729,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
701
729
|
}
|
702
730
|
})
|
703
731
|
),
|
704
|
-
|
705
|
-
rawResponse: { headers: responseHeaders },
|
706
|
-
warnings,
|
732
|
+
response: { headers: responseHeaders },
|
707
733
|
request: { body }
|
708
734
|
};
|
709
735
|
}
|
@@ -716,6 +742,7 @@ function getToolCallsFromParts({
|
|
716
742
|
(part) => "functionCall" in part
|
717
743
|
);
|
718
744
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
745
|
+
type: "tool-call",
|
719
746
|
toolCallType: "function",
|
720
747
|
toolCallId: generateId(),
|
721
748
|
toolName: part.functionCall.name,
|
@@ -723,16 +750,11 @@ function getToolCallsFromParts({
|
|
723
750
|
}));
|
724
751
|
}
|
725
752
|
function getTextFromParts(parts) {
|
726
|
-
const textParts = parts == null ? void 0 : parts.filter(
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
}
|
731
|
-
function getReasoningDetailsFromParts(parts) {
|
732
|
-
const reasoningParts = parts == null ? void 0 : parts.filter(
|
733
|
-
(part) => "text" in part && part.thought === true
|
734
|
-
);
|
735
|
-
return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
|
753
|
+
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
754
|
+
return textParts == null || textParts.length === 0 ? void 0 : {
|
755
|
+
type: "text",
|
756
|
+
text: textParts.map((part) => part.text).join("")
|
757
|
+
};
|
736
758
|
}
|
737
759
|
function getInlineDataParts(parts) {
|
738
760
|
return parts == null ? void 0 : parts.filter(
|
@@ -747,109 +769,101 @@ function extractSources({
|
|
747
769
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
748
770
|
(chunk) => chunk.web != null
|
749
771
|
).map((chunk) => ({
|
772
|
+
type: "source",
|
750
773
|
sourceType: "url",
|
751
774
|
id: generateId(),
|
752
775
|
url: chunk.web.uri,
|
753
776
|
title: chunk.web.title
|
754
777
|
}));
|
755
778
|
}
|
756
|
-
var contentSchema =
|
757
|
-
role:
|
758
|
-
parts:
|
759
|
-
|
760
|
-
|
761
|
-
text:
|
762
|
-
thought: z2.boolean().nullish()
|
779
|
+
var contentSchema = z3.object({
|
780
|
+
role: z3.string(),
|
781
|
+
parts: z3.array(
|
782
|
+
z3.union([
|
783
|
+
z3.object({
|
784
|
+
text: z3.string()
|
763
785
|
}),
|
764
|
-
|
765
|
-
functionCall:
|
766
|
-
name:
|
767
|
-
args:
|
786
|
+
z3.object({
|
787
|
+
functionCall: z3.object({
|
788
|
+
name: z3.string(),
|
789
|
+
args: z3.unknown()
|
768
790
|
})
|
769
791
|
}),
|
770
|
-
|
771
|
-
inlineData:
|
772
|
-
mimeType:
|
773
|
-
data:
|
792
|
+
z3.object({
|
793
|
+
inlineData: z3.object({
|
794
|
+
mimeType: z3.string(),
|
795
|
+
data: z3.string()
|
774
796
|
})
|
775
797
|
})
|
776
798
|
])
|
777
799
|
).nullish()
|
778
800
|
});
|
779
|
-
var groundingChunkSchema =
|
780
|
-
web:
|
781
|
-
retrievedContext:
|
801
|
+
var groundingChunkSchema = z3.object({
|
802
|
+
web: z3.object({ uri: z3.string(), title: z3.string() }).nullish(),
|
803
|
+
retrievedContext: z3.object({ uri: z3.string(), title: z3.string() }).nullish()
|
782
804
|
});
|
783
|
-
var groundingMetadataSchema =
|
784
|
-
webSearchQueries:
|
785
|
-
retrievalQueries:
|
786
|
-
searchEntryPoint:
|
787
|
-
groundingChunks:
|
788
|
-
groundingSupports:
|
789
|
-
|
790
|
-
segment:
|
791
|
-
startIndex:
|
792
|
-
endIndex:
|
793
|
-
text:
|
805
|
+
var groundingMetadataSchema = z3.object({
|
806
|
+
webSearchQueries: z3.array(z3.string()).nullish(),
|
807
|
+
retrievalQueries: z3.array(z3.string()).nullish(),
|
808
|
+
searchEntryPoint: z3.object({ renderedContent: z3.string() }).nullish(),
|
809
|
+
groundingChunks: z3.array(groundingChunkSchema).nullish(),
|
810
|
+
groundingSupports: z3.array(
|
811
|
+
z3.object({
|
812
|
+
segment: z3.object({
|
813
|
+
startIndex: z3.number().nullish(),
|
814
|
+
endIndex: z3.number().nullish(),
|
815
|
+
text: z3.string().nullish()
|
794
816
|
}),
|
795
|
-
segment_text:
|
796
|
-
groundingChunkIndices:
|
797
|
-
supportChunkIndices:
|
798
|
-
confidenceScores:
|
799
|
-
confidenceScore:
|
817
|
+
segment_text: z3.string().nullish(),
|
818
|
+
groundingChunkIndices: z3.array(z3.number()).nullish(),
|
819
|
+
supportChunkIndices: z3.array(z3.number()).nullish(),
|
820
|
+
confidenceScores: z3.array(z3.number()).nullish(),
|
821
|
+
confidenceScore: z3.array(z3.number()).nullish()
|
800
822
|
})
|
801
823
|
).nullish(),
|
802
|
-
retrievalMetadata:
|
803
|
-
|
804
|
-
webDynamicRetrievalScore:
|
824
|
+
retrievalMetadata: z3.union([
|
825
|
+
z3.object({
|
826
|
+
webDynamicRetrievalScore: z3.number()
|
805
827
|
}),
|
806
|
-
|
828
|
+
z3.object({})
|
807
829
|
]).nullish()
|
808
830
|
});
|
809
|
-
var safetyRatingSchema =
|
810
|
-
category:
|
811
|
-
probability:
|
812
|
-
probabilityScore:
|
813
|
-
severity:
|
814
|
-
severityScore:
|
815
|
-
blocked:
|
831
|
+
var safetyRatingSchema = z3.object({
|
832
|
+
category: z3.string().nullish(),
|
833
|
+
probability: z3.string().nullish(),
|
834
|
+
probabilityScore: z3.number().nullish(),
|
835
|
+
severity: z3.string().nullish(),
|
836
|
+
severityScore: z3.number().nullish(),
|
837
|
+
blocked: z3.boolean().nullish()
|
816
838
|
});
|
817
|
-
var
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
839
|
+
var usageSchema = z3.object({
|
840
|
+
cachedContentTokenCount: z3.number().nullish(),
|
841
|
+
thoughtsTokenCount: z3.number().nullish(),
|
842
|
+
promptTokenCount: z3.number().nullish(),
|
843
|
+
candidatesTokenCount: z3.number().nullish(),
|
844
|
+
totalTokenCount: z3.number().nullish()
|
845
|
+
});
|
846
|
+
var responseSchema = z3.object({
|
847
|
+
candidates: z3.array(
|
848
|
+
z3.object({
|
849
|
+
content: contentSchema.nullish().or(z3.object({}).strict()),
|
850
|
+
finishReason: z3.string().nullish(),
|
851
|
+
safetyRatings: z3.array(safetyRatingSchema).nullish(),
|
823
852
|
groundingMetadata: groundingMetadataSchema.nullish()
|
824
853
|
})
|
825
854
|
),
|
826
|
-
usageMetadata:
|
827
|
-
promptTokenCount: z2.number().nullish(),
|
828
|
-
candidatesTokenCount: z2.number().nullish(),
|
829
|
-
totalTokenCount: z2.number().nullish()
|
830
|
-
}).nullish()
|
855
|
+
usageMetadata: usageSchema.nullish()
|
831
856
|
});
|
832
|
-
var chunkSchema =
|
833
|
-
candidates:
|
834
|
-
|
857
|
+
var chunkSchema = z3.object({
|
858
|
+
candidates: z3.array(
|
859
|
+
z3.object({
|
835
860
|
content: contentSchema.nullish(),
|
836
|
-
finishReason:
|
837
|
-
safetyRatings:
|
861
|
+
finishReason: z3.string().nullish(),
|
862
|
+
safetyRatings: z3.array(safetyRatingSchema).nullish(),
|
838
863
|
groundingMetadata: groundingMetadataSchema.nullish()
|
839
864
|
})
|
840
865
|
).nullish(),
|
841
|
-
usageMetadata:
|
842
|
-
promptTokenCount: z2.number().nullish(),
|
843
|
-
candidatesTokenCount: z2.number().nullish(),
|
844
|
-
totalTokenCount: z2.number().nullish()
|
845
|
-
}).nullish()
|
846
|
-
});
|
847
|
-
var googleGenerativeAIProviderOptionsSchema = z2.object({
|
848
|
-
responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).nullish(),
|
849
|
-
thinkingConfig: z2.object({
|
850
|
-
thinkingBudget: z2.number().nullish(),
|
851
|
-
includeThoughts: z2.boolean().nullish()
|
852
|
-
}).nullish()
|
866
|
+
usageMetadata: usageSchema.nullish()
|
853
867
|
});
|
854
868
|
export {
|
855
869
|
GoogleGenerativeAILanguageModel,
|